mirror of
https://github.com/Icinga/icingabeat.git
synced 2025-07-27 07:44:02 +02:00
Update libbeat to v5.1.2
This commit is contained in:
parent
1ea21190d2
commit
a9bb4a709c
@ -1,2 +1,3 @@
|
|||||||
:stack-version: 5.1.1
|
:stack-version: 5.1.1
|
||||||
:doc-branch: 5.1
|
:doc-branch: 5.1
|
||||||
|
:go-version: 1.7.1
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"_meta": {
|
"_meta": {
|
||||||
"version": "5.1.1"
|
"version": "5.1.2"
|
||||||
},
|
},
|
||||||
"dynamic_templates": [
|
"dynamic_templates": [
|
||||||
{
|
{
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
"norms": false
|
"norms": false
|
||||||
},
|
},
|
||||||
"_meta": {
|
"_meta": {
|
||||||
"version": "5.1.1"
|
"version": "5.1.2"
|
||||||
},
|
},
|
||||||
"dynamic_templates": [
|
"dynamic_templates": [
|
||||||
{
|
{
|
||||||
|
18
vendor/github.com/elastic/beats/CHANGELOG.asciidoc
generated
vendored
18
vendor/github.com/elastic/beats/CHANGELOG.asciidoc
generated
vendored
@ -8,7 +8,7 @@
|
|||||||
// Template, add newest changes here
|
// Template, add newest changes here
|
||||||
|
|
||||||
=== Beats version HEAD
|
=== Beats version HEAD
|
||||||
https://github.com/elastic/beats/compare/v5.1.1...5.1[Check the HEAD diff]
|
https://github.com/elastic/beats/compare/v5.1.2...5.1[Check the HEAD diff]
|
||||||
|
|
||||||
==== Breaking changes
|
==== Breaking changes
|
||||||
|
|
||||||
@ -32,6 +32,7 @@ https://github.com/elastic/beats/compare/v5.1.1...5.1[Check the HEAD diff]
|
|||||||
*Packetbeat*
|
*Packetbeat*
|
||||||
|
|
||||||
*Filebeat*
|
*Filebeat*
|
||||||
|
- Fix registry migration issue from old states were files were only harvested after second restart. {pull}3322[3322]
|
||||||
|
|
||||||
*Winlogbeat*
|
*Winlogbeat*
|
||||||
|
|
||||||
@ -61,6 +62,21 @@ https://github.com/elastic/beats/compare/v5.1.1...5.1[Check the HEAD diff]
|
|||||||
|
|
||||||
////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
[[release-notes-5.1.2]]
|
||||||
|
=== Beats version 5.1.2
|
||||||
|
https://github.com/elastic/beats/compare/v5.1.1...v5.1.2[View commits]
|
||||||
|
|
||||||
|
==== Bugfixes
|
||||||
|
|
||||||
|
*Packetbeat*
|
||||||
|
|
||||||
|
- Fix error on importing dashboards due to colons in the Cassandra dashboard. {issue}3140[3140]
|
||||||
|
- Fix error on importing dashboards due to the wrong type for the geo_point fields. {pull}3147[3147]
|
||||||
|
|
||||||
|
*Winlogbeat*
|
||||||
|
|
||||||
|
- Fix for "The array bounds are invalid" error when reading large events. {issue}3076[3076]
|
||||||
|
|
||||||
[[release-notes-5.1.1]]
|
[[release-notes-5.1.1]]
|
||||||
=== Beats version 5.1.1
|
=== Beats version 5.1.1
|
||||||
https://github.com/elastic/beats/compare/v5.0.2...v5.1.1[View commits]
|
https://github.com/elastic/beats/compare/v5.0.2...v5.1.1[View commits]
|
||||||
|
4
vendor/github.com/elastic/beats/Makefile
generated
vendored
4
vendor/github.com/elastic/beats/Makefile
generated
vendored
@ -3,6 +3,7 @@ BUILD_DIR=build
|
|||||||
COVERAGE_DIR=${BUILD_DIR}/coverage
|
COVERAGE_DIR=${BUILD_DIR}/coverage
|
||||||
BEATS=packetbeat filebeat winlogbeat metricbeat heartbeat
|
BEATS=packetbeat filebeat winlogbeat metricbeat heartbeat
|
||||||
PROJECTS=libbeat ${BEATS}
|
PROJECTS=libbeat ${BEATS}
|
||||||
|
PROJECTS_ENV=libbeat metricbeat
|
||||||
SNAPSHOT?=yes
|
SNAPSHOT?=yes
|
||||||
|
|
||||||
# Runs complete testsuites (unit, system, integration) for all beats with coverage and race detection.
|
# Runs complete testsuites (unit, system, integration) for all beats with coverage and race detection.
|
||||||
@ -12,6 +13,9 @@ testsuite:
|
|||||||
$(foreach var,$(PROJECTS),$(MAKE) -C $(var) testsuite || exit 1;)
|
$(foreach var,$(PROJECTS),$(MAKE) -C $(var) testsuite || exit 1;)
|
||||||
#$(MAKE) -C generate test
|
#$(MAKE) -C generate test
|
||||||
|
|
||||||
|
stop-environments:
|
||||||
|
$(foreach var,$(PROJECTS_ENV),$(MAKE) -C $(var) stop-environment || exit 0;)
|
||||||
|
|
||||||
# Runs unit and system tests without coverage and race detection.
|
# Runs unit and system tests without coverage and race detection.
|
||||||
.PHONY: test
|
.PHONY: test
|
||||||
test:
|
test:
|
||||||
|
2
vendor/github.com/elastic/beats/dev-tools/packer/version.yml
generated
vendored
2
vendor/github.com/elastic/beats/dev-tools/packer/version.yml
generated
vendored
@ -1 +1 @@
|
|||||||
version: "5.1.1"
|
version: "5.1.2"
|
||||||
|
88
vendor/github.com/elastic/beats/filebeat/docs/filebeat-filtering.asciidoc
generated
vendored
88
vendor/github.com/elastic/beats/filebeat/docs/filebeat-filtering.asciidoc
generated
vendored
@ -1,16 +1,32 @@
|
|||||||
[[filtering-and-enhancing-data]]
|
[[filtering-and-enhancing-data]]
|
||||||
== Filtering and Enhancing the Exported Data
|
== Filtering and Enhancing the Exported Data
|
||||||
|
|
||||||
When your use case requires only a subset of the data exported by Filebeat or you need to add metadata, you can <<filebeat-filtering-overview,use Filebeat config options to filter the data>>, or you can <<defining-processors,define processors>>.
|
Your use case might require only a subset of the data exported by Filebeat, or
|
||||||
|
you might need to enhance the exported data (for example, by adding metadata).
|
||||||
|
Filebeat provides a couple of options for filtering and enhancing exported
|
||||||
|
data. You can:
|
||||||
|
|
||||||
|
* <<filebeat-filtering-overview,Define filters at the prospector level>> to
|
||||||
|
configure each prospector to include or exclude specific lines or files.
|
||||||
|
* <<defining-processors,Define processors>> to configure global processing
|
||||||
|
across all data exported by Filebeat.
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
[[filebeat-filtering-overview]]
|
[[filebeat-filtering-overview]]
|
||||||
=== Filebeat Config Options for Filtering
|
=== Filtering at the Prospector Level
|
||||||
|
|
||||||
You can specify configuration options in the `filebeat` section of the config file to define regular expressions that
|
You can specify filtering options at the prospector level to configure which
|
||||||
match the lines you want to include and/or exclude from the output. The supported options are <<include-lines,`include_lines`>>, <<exclude-lines,`exclude_lines`>>, and <<exclude-files,`exclude_files`>>.
|
lines or files are included or excluded in the output. This allows you to
|
||||||
|
specify different filtering criteria for each prospector.
|
||||||
|
|
||||||
For example, you can use the `include_lines` option to export any lines that start with "ERR" or "WARN":
|
You configure prospector-level filtering in the `filebeat.prospectors` section
|
||||||
|
of the config file by specifying regular expressions that match the lines you
|
||||||
|
want to include and/or exclude from the output. The supported options are
|
||||||
|
<<include-lines,`include_lines`>>, <<exclude-lines,`exclude_lines`>>, and
|
||||||
|
<<exclude-files,`exclude_files`>>.
|
||||||
|
|
||||||
|
For example, you can use the `include_lines` option to export any lines that
|
||||||
|
start with "ERR" or "WARN":
|
||||||
|
|
||||||
[source,yaml]
|
[source,yaml]
|
||||||
-------------------------------------------------------------------------------------
|
-------------------------------------------------------------------------------------
|
||||||
@ -21,9 +37,11 @@ filebeat.prospectors:
|
|||||||
include_lines: ["^ERR", "^WARN"]
|
include_lines: ["^ERR", "^WARN"]
|
||||||
-------------------------------------------------------------------------------------
|
-------------------------------------------------------------------------------------
|
||||||
|
|
||||||
The disadvantage of this approach is that you need to implement a configuration option for each filtering criteria that you need.
|
The disadvantage of this approach is that you need to implement a
|
||||||
|
configuration option for each filtering criteria that you need.
|
||||||
|
|
||||||
See <<configuration-filebeat-options,Filebeat configuration options>> for more information about each option.
|
See <<configuration-filebeat-options,Filebeat configuration options>> for more
|
||||||
|
information about each option.
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
[[defining-processors]]
|
[[defining-processors]]
|
||||||
@ -31,7 +49,11 @@ See <<configuration-filebeat-options,Filebeat configuration options>> for more i
|
|||||||
|
|
||||||
include::../../libbeat/docs/processors.asciidoc[]
|
include::../../libbeat/docs/processors.asciidoc[]
|
||||||
|
|
||||||
For example, the following configuration drops all the DEBUG messages.
|
[float]
|
||||||
|
[[drop-event-example]]
|
||||||
|
==== Drop Event Example
|
||||||
|
|
||||||
|
The following configuration drops all the DEBUG messages.
|
||||||
|
|
||||||
[source,yaml]
|
[source,yaml]
|
||||||
-----------------------------------------------------
|
-----------------------------------------------------
|
||||||
@ -53,4 +75,54 @@ processors:
|
|||||||
source: "test"
|
source: "test"
|
||||||
----------------
|
----------------
|
||||||
|
|
||||||
|
[float]
|
||||||
|
[[decode-json-example]]
|
||||||
|
==== Decode JSON Example
|
||||||
|
|
||||||
|
In the following example, the fields exported by Filebeat include a
|
||||||
|
field, `inner`, whose value is a JSON object encoded as a string:
|
||||||
|
|
||||||
|
[source,json]
|
||||||
|
-----------------------------------------------------
|
||||||
|
{ "outer": "value", "inner": "{\"data\": \"value\"}" }
|
||||||
|
-----------------------------------------------------
|
||||||
|
|
||||||
|
The following configuration decodes the inner JSON object:
|
||||||
|
|
||||||
|
[source,yaml]
|
||||||
|
-----------------------------------------------------
|
||||||
|
filebeat.prospectors:
|
||||||
|
- paths:
|
||||||
|
- input.json
|
||||||
|
json.keys_under_root: true
|
||||||
|
|
||||||
|
processors:
|
||||||
|
- decode_json_fields:
|
||||||
|
fields: ["inner"]
|
||||||
|
|
||||||
|
output.console.pretty: true
|
||||||
|
-----------------------------------------------------
|
||||||
|
|
||||||
|
The resulting output looks something like this:
|
||||||
|
|
||||||
|
["source","json",subs="attributes"]
|
||||||
|
-----------------------------------------------------
|
||||||
|
{
|
||||||
|
"@timestamp": "2016-12-06T17:38:11.541Z",
|
||||||
|
"beat": {
|
||||||
|
"hostname": "host.example.com",
|
||||||
|
"name": "host.example.com",
|
||||||
|
"version": "{version}"
|
||||||
|
},
|
||||||
|
"inner": {
|
||||||
|
"data": "value"
|
||||||
|
},
|
||||||
|
"input_type": "log",
|
||||||
|
"offset": 55,
|
||||||
|
"outer": "value",
|
||||||
|
"source": "input.json",
|
||||||
|
"type": "log"
|
||||||
|
}
|
||||||
|
-----------------------------------------------------
|
||||||
|
|
||||||
See <<configuration-processors>> for more information.
|
See <<configuration-processors>> for more information.
|
||||||
|
2
vendor/github.com/elastic/beats/filebeat/docs/reference/configuration.asciidoc
generated
vendored
2
vendor/github.com/elastic/beats/filebeat/docs/reference/configuration.asciidoc
generated
vendored
@ -12,7 +12,6 @@ configuration settings, you need to restart {beatname_uc} to pick up the changes
|
|||||||
* <<configuration-filebeat-options>>
|
* <<configuration-filebeat-options>>
|
||||||
* <<configuration-global-options>>
|
* <<configuration-global-options>>
|
||||||
* <<configuration-general>>
|
* <<configuration-general>>
|
||||||
* <<configuration-processors>>
|
|
||||||
* <<elasticsearch-output>>
|
* <<elasticsearch-output>>
|
||||||
* <<logstash-output>>
|
* <<logstash-output>>
|
||||||
* <<kafka-output>>
|
* <<kafka-output>>
|
||||||
@ -22,6 +21,7 @@ configuration settings, you need to restart {beatname_uc} to pick up the changes
|
|||||||
* <<configuration-output-ssl>>
|
* <<configuration-output-ssl>>
|
||||||
* <<configuration-path>>
|
* <<configuration-path>>
|
||||||
* <<configuration-logging>>
|
* <<configuration-logging>>
|
||||||
|
* <<configuration-processors>>
|
||||||
|
|
||||||
include::configuration/filebeat-options.asciidoc[]
|
include::configuration/filebeat-options.asciidoc[]
|
||||||
|
|
||||||
|
@ -560,11 +560,11 @@ filebeat.shutdown_timeout: 5s
|
|||||||
|
|
||||||
include::../../../../libbeat/docs/generalconfig.asciidoc[]
|
include::../../../../libbeat/docs/generalconfig.asciidoc[]
|
||||||
|
|
||||||
include::../../../../libbeat/docs/processors-config.asciidoc[]
|
|
||||||
|
|
||||||
include::../../../../libbeat/docs/outputconfig.asciidoc[]
|
include::../../../../libbeat/docs/outputconfig.asciidoc[]
|
||||||
|
|
||||||
include::../../../../libbeat/docs/shared-path-config.asciidoc[]
|
include::../../../../libbeat/docs/shared-path-config.asciidoc[]
|
||||||
|
|
||||||
include::../../../../libbeat/docs/loggingconfig.asciidoc[]
|
include::../../../../libbeat/docs/loggingconfig.asciidoc[]
|
||||||
|
|
||||||
|
include::../../../../libbeat/docs/processors-config.asciidoc[]
|
||||||
|
|
||||||
|
1
vendor/github.com/elastic/beats/filebeat/docs/version.asciidoc
generated
vendored
1
vendor/github.com/elastic/beats/filebeat/docs/version.asciidoc
generated
vendored
@ -1,2 +1,3 @@
|
|||||||
:stack-version: 5.1.1
|
:stack-version: 5.1.1
|
||||||
:doc-branch: 5.1
|
:doc-branch: 5.1
|
||||||
|
:go-version: 1.7.1
|
||||||
|
2
vendor/github.com/elastic/beats/filebeat/filebeat.template-es2x.json
generated
vendored
2
vendor/github.com/elastic/beats/filebeat/filebeat.template-es2x.json
generated
vendored
@ -7,7 +7,7 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"_meta": {
|
"_meta": {
|
||||||
"version": "5.1.1"
|
"version": "5.1.2"
|
||||||
},
|
},
|
||||||
"dynamic_templates": [
|
"dynamic_templates": [
|
||||||
{
|
{
|
||||||
|
2
vendor/github.com/elastic/beats/filebeat/filebeat.template.json
generated
vendored
2
vendor/github.com/elastic/beats/filebeat/filebeat.template.json
generated
vendored
@ -5,7 +5,7 @@
|
|||||||
"norms": false
|
"norms": false
|
||||||
},
|
},
|
||||||
"_meta": {
|
"_meta": {
|
||||||
"version": "5.1.1"
|
"version": "5.1.2"
|
||||||
},
|
},
|
||||||
"dynamic_templates": [
|
"dynamic_templates": [
|
||||||
{
|
{
|
||||||
|
24
vendor/github.com/elastic/beats/filebeat/registrar/registrar.go
generated
vendored
24
vendor/github.com/elastic/beats/filebeat/registrar/registrar.go
generated
vendored
@ -118,15 +118,7 @@ func (r *Registrar) loadStates() error {
|
|||||||
return fmt.Errorf("Error decoding states: %s", err)
|
return fmt.Errorf("Error decoding states: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set all states to finished and disable TTL on restart
|
states = resetStates(states)
|
||||||
// For all states covered by a prospector, TTL will be overwritten with the prospector value
|
|
||||||
for key, state := range states {
|
|
||||||
state.Finished = true
|
|
||||||
// Set ttl to -2 to easily spot which states are not managed by a prospector
|
|
||||||
state.TTL = -2
|
|
||||||
states[key] = state
|
|
||||||
}
|
|
||||||
|
|
||||||
r.states.SetStates(states)
|
r.states.SetStates(states)
|
||||||
logp.Info("States Loaded from registrar: %+v", len(states))
|
logp.Info("States Loaded from registrar: %+v", len(states))
|
||||||
|
|
||||||
@ -176,6 +168,7 @@ func (r *Registrar) loadAndConvertOldState(f *os.File) bool {
|
|||||||
// Convert old states to new states
|
// Convert old states to new states
|
||||||
logp.Info("Old registry states found: %v", len(oldStates))
|
logp.Info("Old registry states found: %v", len(oldStates))
|
||||||
states := convertOldStates(oldStates)
|
states := convertOldStates(oldStates)
|
||||||
|
states = resetStates(states)
|
||||||
r.states.SetStates(states)
|
r.states.SetStates(states)
|
||||||
|
|
||||||
// Rewrite registry in new format
|
// Rewrite registry in new format
|
||||||
@ -186,6 +179,19 @@ func (r *Registrar) loadAndConvertOldState(f *os.File) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// resetStates sets all states to finished and disable TTL on restart
|
||||||
|
// For all states covered by a prospector, TTL will be overwritten with the prospector value
|
||||||
|
func resetStates(states []file.State) []file.State {
|
||||||
|
|
||||||
|
for key, state := range states {
|
||||||
|
state.Finished = true
|
||||||
|
// Set ttl to -2 to easily spot which states are not managed by a prospector
|
||||||
|
state.TTL = -2
|
||||||
|
states[key] = state
|
||||||
|
}
|
||||||
|
return states
|
||||||
|
}
|
||||||
|
|
||||||
func convertOldStates(oldStates map[string]file.State) []file.State {
|
func convertOldStates(oldStates map[string]file.State) []file.State {
|
||||||
// Convert old states to new states
|
// Convert old states to new states
|
||||||
states := []file.State{}
|
states := []file.State{}
|
||||||
|
178
vendor/github.com/elastic/beats/filebeat/tests/system/test_migration.py
generated
vendored
Normal file
178
vendor/github.com/elastic/beats/filebeat/tests/system/test_migration.py
generated
vendored
Normal file
@ -0,0 +1,178 @@
|
|||||||
|
from filebeat import BaseTest
|
||||||
|
|
||||||
|
import os
|
||||||
|
import platform
|
||||||
|
import time
|
||||||
|
import shutil
|
||||||
|
import json
|
||||||
|
import stat
|
||||||
|
from nose.plugins.skip import Skip, SkipTest
|
||||||
|
|
||||||
|
|
||||||
|
class Test(BaseTest):
|
||||||
|
|
||||||
|
def test_migration_non_windows(self):
|
||||||
|
"""
|
||||||
|
Tests if migration from old filebeat registry to new format works
|
||||||
|
"""
|
||||||
|
|
||||||
|
if os.name == "nt":
|
||||||
|
raise SkipTest
|
||||||
|
|
||||||
|
registry_file = self.working_dir + '/registry'
|
||||||
|
|
||||||
|
# Write old registry file
|
||||||
|
with open(registry_file, 'w') as f:
|
||||||
|
f.write('{"logs/hello.log":{"source":"logs/hello.log","offset":4,"FileStateOS":{"inode":30178938,"device":16777220}},"logs/log2.log":{"source":"logs/log2.log","offset":6,"FileStateOS":{"inode":30178958,"device":16777220}}}')
|
||||||
|
|
||||||
|
self.render_config_template(
|
||||||
|
path=os.path.abspath(self.working_dir) + "/log/input*",
|
||||||
|
clean_removed="false",
|
||||||
|
clean_inactive="0",
|
||||||
|
)
|
||||||
|
|
||||||
|
filebeat = self.start_beat()
|
||||||
|
|
||||||
|
self.wait_until(
|
||||||
|
lambda: self.log_contains("Old registry states found: 2"),
|
||||||
|
max_timeout=15)
|
||||||
|
|
||||||
|
self.wait_until(
|
||||||
|
lambda: self.log_contains("Old states converted to new states and written to registrar: 2"),
|
||||||
|
max_timeout=15)
|
||||||
|
|
||||||
|
filebeat.check_kill_and_wait()
|
||||||
|
|
||||||
|
# Check if content is same as above
|
||||||
|
assert self.get_registry_entry_by_path("logs/hello.log")["offset"] == 4
|
||||||
|
assert self.get_registry_entry_by_path("logs/log2.log")["offset"] == 6
|
||||||
|
|
||||||
|
# Compare first entry
|
||||||
|
oldJson = json.loads('{"source":"logs/hello.log","offset":4,"FileStateOS":{"inode":30178938,"device":16777220}}')
|
||||||
|
newJson = self.get_registry_entry_by_path("logs/hello.log")
|
||||||
|
del newJson["timestamp"]
|
||||||
|
del newJson["ttl"]
|
||||||
|
assert newJson == oldJson
|
||||||
|
|
||||||
|
# Compare second entry
|
||||||
|
oldJson = json.loads('{"source":"logs/log2.log","offset":6,"FileStateOS":{"inode":30178958,"device":16777220}}')
|
||||||
|
newJson = self.get_registry_entry_by_path("logs/log2.log")
|
||||||
|
del newJson["timestamp"]
|
||||||
|
del newJson["ttl"]
|
||||||
|
assert newJson == oldJson
|
||||||
|
|
||||||
|
# Make sure the right number of entries is in
|
||||||
|
data = self.get_registry()
|
||||||
|
assert len(data) == 2
|
||||||
|
|
||||||
|
def test_migration_windows(self):
|
||||||
|
"""
|
||||||
|
Tests if migration from old filebeat registry to new format works
|
||||||
|
"""
|
||||||
|
|
||||||
|
if os.name != "nt":
|
||||||
|
raise SkipTest
|
||||||
|
|
||||||
|
registry_file = self.working_dir + '/registry'
|
||||||
|
|
||||||
|
# Write old registry file
|
||||||
|
with open(registry_file, 'w') as f:
|
||||||
|
f.write('{"logs/hello.log":{"source":"logs/hello.log","offset":4,"FileStateOS":{"idxhi":1,"idxlo":12,"vol":34}},"logs/log2.log":{"source":"logs/log2.log","offset":6,"FileStateOS":{"idxhi":67,"idxlo":44,"vol":12}}}')
|
||||||
|
|
||||||
|
self.render_config_template(
|
||||||
|
path=os.path.abspath(self.working_dir) + "/log/input*",
|
||||||
|
)
|
||||||
|
|
||||||
|
filebeat = self.start_beat()
|
||||||
|
|
||||||
|
self.wait_until(
|
||||||
|
lambda: self.log_contains("Old registry states found: 2"),
|
||||||
|
max_timeout=15)
|
||||||
|
|
||||||
|
self.wait_until(
|
||||||
|
lambda: self.log_contains("Old states converted to new states and written to registrar: 2"),
|
||||||
|
max_timeout=15)
|
||||||
|
|
||||||
|
filebeat.check_kill_and_wait()
|
||||||
|
|
||||||
|
# Check if content is same as above
|
||||||
|
assert self.get_registry_entry_by_path("logs/hello.log")["offset"] == 4
|
||||||
|
assert self.get_registry_entry_by_path("logs/log2.log")["offset"] == 6
|
||||||
|
|
||||||
|
# Compare first entry
|
||||||
|
oldJson = json.loads('{"source":"logs/hello.log","offset":4,"FileStateOS":{"idxhi":1,"idxlo":12,"vol":34}}')
|
||||||
|
newJson = self.get_registry_entry_by_path("logs/hello.log")
|
||||||
|
del newJson["timestamp"]
|
||||||
|
del newJson["ttl"]
|
||||||
|
assert newJson == oldJson
|
||||||
|
|
||||||
|
# Compare second entry
|
||||||
|
oldJson = json.loads('{"source":"logs/log2.log","offset":6,"FileStateOS":{"idxhi":67,"idxlo":44,"vol":12}}')
|
||||||
|
newJson = self.get_registry_entry_by_path("logs/log2.log")
|
||||||
|
del newJson["timestamp"]
|
||||||
|
del newJson["ttl"]
|
||||||
|
assert newJson == oldJson
|
||||||
|
|
||||||
|
# Make sure the right number of entries is in
|
||||||
|
data = self.get_registry()
|
||||||
|
assert len(data) == 2
|
||||||
|
|
||||||
|
def test_migration_continue_reading(self):
|
||||||
|
"""
|
||||||
|
Tests if after the migration filebeat keeps reading the file
|
||||||
|
"""
|
||||||
|
|
||||||
|
os.mkdir(self.working_dir + "/log/")
|
||||||
|
testfile1 = self.working_dir + "/log/test.log"
|
||||||
|
|
||||||
|
with open(testfile1, 'w') as f:
|
||||||
|
f.write("entry10\n")
|
||||||
|
|
||||||
|
registry_file = self.working_dir + '/registry'
|
||||||
|
|
||||||
|
self.render_config_template(
|
||||||
|
path=os.path.abspath(self.working_dir) + "/log/*",
|
||||||
|
output_file_filename="filebeat_1",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Run filebeat to create a registry
|
||||||
|
filebeat = self.start_beat(output="filebeat1.log")
|
||||||
|
self.wait_until(
|
||||||
|
lambda: self.output_has(lines=1, output_file="output/filebeat_1"),
|
||||||
|
max_timeout=10)
|
||||||
|
filebeat.check_kill_and_wait()
|
||||||
|
|
||||||
|
# Create old registry file out of the new one
|
||||||
|
r = self.get_registry()
|
||||||
|
registry_entry = r[0]
|
||||||
|
del registry_entry["timestamp"]
|
||||||
|
del registry_entry["ttl"]
|
||||||
|
old_registry = {registry_entry["source"]: registry_entry}
|
||||||
|
|
||||||
|
# Overwrite registry
|
||||||
|
with open(registry_file, 'w') as f:
|
||||||
|
json.dump(old_registry, f)
|
||||||
|
|
||||||
|
|
||||||
|
self.render_config_template(
|
||||||
|
path=os.path.abspath(self.working_dir) + "/log/*",
|
||||||
|
output_file_filename="filebeat_2",
|
||||||
|
)
|
||||||
|
|
||||||
|
filebeat = self.start_beat(output="filebeat2.log")
|
||||||
|
|
||||||
|
# Wait until state is migrated
|
||||||
|
self.wait_until(
|
||||||
|
lambda: self.log_contains(
|
||||||
|
"Old states converted to new states and written to registrar: 1", "filebeat2.log"),
|
||||||
|
max_timeout=10)
|
||||||
|
|
||||||
|
with open(testfile1, 'a') as f:
|
||||||
|
f.write("entry12\n")
|
||||||
|
|
||||||
|
# After restart new output file is created -> only 1 new entry
|
||||||
|
self.wait_until(
|
||||||
|
lambda: self.output_has(lines=1, output_file="output/filebeat_2"),
|
||||||
|
max_timeout=10)
|
||||||
|
|
||||||
|
filebeat.check_kill_and_wait()
|
108
vendor/github.com/elastic/beats/filebeat/tests/system/test_registrar.py
generated
vendored
108
vendor/github.com/elastic/beats/filebeat/tests/system/test_registrar.py
generated
vendored
@ -624,114 +624,6 @@ class Test(BaseTest):
|
|||||||
assert self.get_registry_entry_by_path(os.path.abspath(testfile1))["offset"] == 9
|
assert self.get_registry_entry_by_path(os.path.abspath(testfile1))["offset"] == 9
|
||||||
assert self.get_registry_entry_by_path(os.path.abspath(testfile2))["offset"] == 8
|
assert self.get_registry_entry_by_path(os.path.abspath(testfile2))["offset"] == 8
|
||||||
|
|
||||||
|
|
||||||
def test_migration_non_windows(self):
|
|
||||||
"""
|
|
||||||
Tests if migration from old filebeat registry to new format works
|
|
||||||
"""
|
|
||||||
|
|
||||||
if os.name == "nt":
|
|
||||||
raise SkipTest
|
|
||||||
|
|
||||||
registry_file = self.working_dir + '/registry'
|
|
||||||
|
|
||||||
# Write old registry file
|
|
||||||
with open(registry_file, 'w') as f:
|
|
||||||
f.write('{"logs/hello.log":{"source":"logs/hello.log","offset":4,"FileStateOS":{"inode":30178938,"device":16777220}},"logs/log2.log":{"source":"logs/log2.log","offset":6,"FileStateOS":{"inode":30178958,"device":16777220}}}')
|
|
||||||
|
|
||||||
self.render_config_template(
|
|
||||||
path=os.path.abspath(self.working_dir) + "/log/input*",
|
|
||||||
clean_removed="false",
|
|
||||||
clean_inactive="0",
|
|
||||||
)
|
|
||||||
|
|
||||||
filebeat = self.start_beat()
|
|
||||||
|
|
||||||
self.wait_until(
|
|
||||||
lambda: self.log_contains("Old registry states found: 2"),
|
|
||||||
max_timeout=15)
|
|
||||||
|
|
||||||
self.wait_until(
|
|
||||||
lambda: self.log_contains("Old states converted to new states and written to registrar: 2"),
|
|
||||||
max_timeout=15)
|
|
||||||
|
|
||||||
filebeat.check_kill_and_wait()
|
|
||||||
|
|
||||||
# Check if content is same as above
|
|
||||||
assert self.get_registry_entry_by_path("logs/hello.log")["offset"] == 4
|
|
||||||
assert self.get_registry_entry_by_path("logs/log2.log")["offset"] == 6
|
|
||||||
|
|
||||||
# Compare first entry
|
|
||||||
oldJson = json.loads('{"source":"logs/hello.log","offset":4,"FileStateOS":{"inode":30178938,"device":16777220}}')
|
|
||||||
newJson = self.get_registry_entry_by_path("logs/hello.log")
|
|
||||||
del newJson["timestamp"]
|
|
||||||
del newJson["ttl"]
|
|
||||||
assert newJson == oldJson
|
|
||||||
|
|
||||||
# Compare second entry
|
|
||||||
oldJson = json.loads('{"source":"logs/log2.log","offset":6,"FileStateOS":{"inode":30178958,"device":16777220}}')
|
|
||||||
newJson = self.get_registry_entry_by_path("logs/log2.log")
|
|
||||||
del newJson["timestamp"]
|
|
||||||
del newJson["ttl"]
|
|
||||||
assert newJson == oldJson
|
|
||||||
|
|
||||||
# Make sure the right number of entries is in
|
|
||||||
data = self.get_registry()
|
|
||||||
assert len(data) == 2
|
|
||||||
|
|
||||||
def test_migration_windows(self):
|
|
||||||
"""
|
|
||||||
Tests if migration from old filebeat registry to new format works
|
|
||||||
"""
|
|
||||||
|
|
||||||
if os.name != "nt":
|
|
||||||
raise SkipTest
|
|
||||||
|
|
||||||
registry_file = self.working_dir + '/registry'
|
|
||||||
|
|
||||||
# Write old registry file
|
|
||||||
with open(registry_file, 'w') as f:
|
|
||||||
f.write('{"logs/hello.log":{"source":"logs/hello.log","offset":4,"FileStateOS":{"idxhi":1,"idxlo":12,"vol":34}},"logs/log2.log":{"source":"logs/log2.log","offset":6,"FileStateOS":{"idxhi":67,"idxlo":44,"vol":12}}}')
|
|
||||||
|
|
||||||
self.render_config_template(
|
|
||||||
path=os.path.abspath(self.working_dir) + "/log/input*",
|
|
||||||
)
|
|
||||||
|
|
||||||
filebeat = self.start_beat()
|
|
||||||
|
|
||||||
self.wait_until(
|
|
||||||
lambda: self.log_contains("Old registry states found: 2"),
|
|
||||||
max_timeout=15)
|
|
||||||
|
|
||||||
self.wait_until(
|
|
||||||
lambda: self.log_contains("Old states converted to new states and written to registrar: 2"),
|
|
||||||
max_timeout=15)
|
|
||||||
|
|
||||||
filebeat.check_kill_and_wait()
|
|
||||||
|
|
||||||
# Check if content is same as above
|
|
||||||
assert self.get_registry_entry_by_path("logs/hello.log")["offset"] == 4
|
|
||||||
assert self.get_registry_entry_by_path("logs/log2.log")["offset"] == 6
|
|
||||||
|
|
||||||
# Compare first entry
|
|
||||||
oldJson = json.loads('{"source":"logs/hello.log","offset":4,"FileStateOS":{"idxhi":1,"idxlo":12,"vol":34}}')
|
|
||||||
newJson = self.get_registry_entry_by_path("logs/hello.log")
|
|
||||||
del newJson["timestamp"]
|
|
||||||
del newJson["ttl"]
|
|
||||||
assert newJson == oldJson
|
|
||||||
|
|
||||||
# Compare second entry
|
|
||||||
oldJson = json.loads('{"source":"logs/log2.log","offset":6,"FileStateOS":{"idxhi":67,"idxlo":44,"vol":12}}')
|
|
||||||
newJson = self.get_registry_entry_by_path("logs/log2.log")
|
|
||||||
del newJson["timestamp"]
|
|
||||||
del newJson["ttl"]
|
|
||||||
assert newJson == oldJson
|
|
||||||
|
|
||||||
# Make sure the right number of entries is in
|
|
||||||
data = self.get_registry()
|
|
||||||
assert len(data) == 2
|
|
||||||
|
|
||||||
|
|
||||||
def test_clean_inactive(self):
|
def test_clean_inactive(self):
|
||||||
"""
|
"""
|
||||||
Checks that states are properly removed after clean_inactive
|
Checks that states are properly removed after clean_inactive
|
||||||
|
1
vendor/github.com/elastic/beats/heartbeat/docs/version.asciidoc
generated
vendored
1
vendor/github.com/elastic/beats/heartbeat/docs/version.asciidoc
generated
vendored
@ -1,2 +1,3 @@
|
|||||||
:stack-version: 5.1.1
|
:stack-version: 5.1.1
|
||||||
:doc-branch: 5.1
|
:doc-branch: 5.1
|
||||||
|
:go-version: 1.7.1
|
||||||
|
2
vendor/github.com/elastic/beats/heartbeat/heartbeat.template-es2x.json
generated
vendored
2
vendor/github.com/elastic/beats/heartbeat/heartbeat.template-es2x.json
generated
vendored
@ -7,7 +7,7 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"_meta": {
|
"_meta": {
|
||||||
"version": "5.1.1"
|
"version": "5.1.2"
|
||||||
},
|
},
|
||||||
"dynamic_templates": [
|
"dynamic_templates": [
|
||||||
{
|
{
|
||||||
|
2
vendor/github.com/elastic/beats/heartbeat/heartbeat.template.json
generated
vendored
2
vendor/github.com/elastic/beats/heartbeat/heartbeat.template.json
generated
vendored
@ -5,7 +5,7 @@
|
|||||||
"norms": false
|
"norms": false
|
||||||
},
|
},
|
||||||
"_meta": {
|
"_meta": {
|
||||||
"version": "5.1.1"
|
"version": "5.1.2"
|
||||||
},
|
},
|
||||||
"dynamic_templates": [
|
"dynamic_templates": [
|
||||||
{
|
{
|
||||||
|
2
vendor/github.com/elastic/beats/libbeat/beat/version.go
generated
vendored
2
vendor/github.com/elastic/beats/libbeat/beat/version.go
generated
vendored
@ -1,3 +1,3 @@
|
|||||||
package beat
|
package beat
|
||||||
|
|
||||||
const defaultBeatVersion = "5.1.1"
|
const defaultBeatVersion = "5.1.2"
|
||||||
|
4
vendor/github.com/elastic/beats/libbeat/docs/communitybeats.asciidoc
generated
vendored
4
vendor/github.com/elastic/beats/libbeat/docs/communitybeats.asciidoc
generated
vendored
@ -7,9 +7,11 @@ out some of them here.
|
|||||||
NOTE: Elastic provides no warranty or support for community-sourced Beats.
|
NOTE: Elastic provides no warranty or support for community-sourced Beats.
|
||||||
|
|
||||||
[horizontal]
|
[horizontal]
|
||||||
|
https://github.com/awormuth/amazonbeat[amazonbeat]:: Reads data from a specified Amazon product.
|
||||||
https://github.com/radoondas/apachebeat[apachebeat]:: Reads status from Apache HTTPD server-status.
|
https://github.com/radoondas/apachebeat[apachebeat]:: Reads status from Apache HTTPD server-status.
|
||||||
https://github.com/goomzee/burrowbeat[burrowbeat]:: Monitors Kafka consumer lag using Burrow.
|
https://github.com/goomzee/burrowbeat[burrowbeat]:: Monitors Kafka consumer lag using Burrow.
|
||||||
https://github.com/goomzee/cassandrabeat[cassandrabeat]:: Uses Cassandra's nodetool cfstats utility to monitor Cassandra database nodes and lag.
|
https://github.com/goomzee/cassandrabeat[cassandrabeat]:: Uses Cassandra's nodetool cfstats utility to monitor Cassandra database nodes and lag.
|
||||||
|
https://github.com/hartfordfive/cloudflarebeat[cloudflarebeat]:: Indexes log entries from the Cloudflare Enterprise Log Share API.
|
||||||
https://github.com/aidan-/cloudtrailbeat[cloudtrailbeat]:: Reads events from Amazon Web Services' https://aws.amazon.com/cloudtrail/[CloudTrail].
|
https://github.com/aidan-/cloudtrailbeat[cloudtrailbeat]:: Reads events from Amazon Web Services' https://aws.amazon.com/cloudtrail/[CloudTrail].
|
||||||
https://github.com/Pravoru/consulbeat[consulbeat]:: Reads services health checks from consul and pushes them to elastic.
|
https://github.com/Pravoru/consulbeat[consulbeat]:: Reads services health checks from consul and pushes them to elastic.
|
||||||
https://github.com/Ingensi/dockbeat[dockbeat]:: Reads Docker container
|
https://github.com/Ingensi/dockbeat[dockbeat]:: Reads Docker container
|
||||||
@ -27,6 +29,7 @@ https://github.com/radoondas/jmxproxybeat[jmxproxybeat]:: Reads Tomcat JMX metri
|
|||||||
https://github.com/mheese/journalbeat[journalbeat]:: Used for log shipping from systemd/journald based Linux systems.
|
https://github.com/mheese/journalbeat[journalbeat]:: Used for log shipping from systemd/journald based Linux systems.
|
||||||
https://github.com/eskibars/lmsensorsbeat[lmsensorsbeat]:: Collects data from lm-sensors (such as CPU temperatures, fan speeds, and voltages from i2c and smbus).
|
https://github.com/eskibars/lmsensorsbeat[lmsensorsbeat]:: Collects data from lm-sensors (such as CPU temperatures, fan speeds, and voltages from i2c and smbus).
|
||||||
https://github.com/consulthys/logstashbeat[logstashbeat]:: Collects data from Logstash monitoring API (v5 onwards) and indexes them in Elasticsearch.
|
https://github.com/consulthys/logstashbeat[logstashbeat]:: Collects data from Logstash monitoring API (v5 onwards) and indexes them in Elasticsearch.
|
||||||
|
https://github.com/scottcrespo/mongobeat[mongobeat]:: Monitors MongoDB instances and can be configured to send multiple document types to Elasticsearch.
|
||||||
https://github.com/adibendahan/mysqlbeat[mysqlbeat]:: Run any query on MySQL and send results to Elasticsearch.
|
https://github.com/adibendahan/mysqlbeat[mysqlbeat]:: Run any query on MySQL and send results to Elasticsearch.
|
||||||
https://github.com/PhaedrusTheGreek/nagioscheckbeat[nagioscheckbeat]:: For Nagios checks and performance data.
|
https://github.com/PhaedrusTheGreek/nagioscheckbeat[nagioscheckbeat]:: For Nagios checks and performance data.
|
||||||
https://github.com/mrkschan/nginxbeat[nginxbeat]:: Reads status from Nginx.
|
https://github.com/mrkschan/nginxbeat[nginxbeat]:: Reads status from Nginx.
|
||||||
@ -44,6 +47,7 @@ https://github.com/martinhoefling/saltbeat[saltbeat]:: Reads events from salt ma
|
|||||||
https://github.com/consulthys/springbeat[springbeat]:: Collects health and metrics data from Spring Boot applications running with the actuator module.
|
https://github.com/consulthys/springbeat[springbeat]:: Collects health and metrics data from Spring Boot applications running with the actuator module.
|
||||||
https://github.com/buehler/go-elastic-twitterbeat[twitterbeat]:: Reads tweets for specified screen names.
|
https://github.com/buehler/go-elastic-twitterbeat[twitterbeat]:: Reads tweets for specified screen names.
|
||||||
https://github.com/gravitational/udpbeat[udpbeat]:: Ships structured logs via UDP.
|
https://github.com/gravitational/udpbeat[udpbeat]:: Ships structured logs via UDP.
|
||||||
|
https://github.com/hartfordfive/udplogbeat[udplogbeat]:: Accept events via local UDP socket (in plain-text or JSON with ability to enforce schemas). Can also be used for applications only supporting syslog logging.
|
||||||
https://github.com/cleesmith/unifiedbeat[unifiedbeat]:: Reads records from Unified2 binary files generated by
|
https://github.com/cleesmith/unifiedbeat[unifiedbeat]:: Reads records from Unified2 binary files generated by
|
||||||
network intrusion detection software and indexes the records in Elasticsearch.
|
network intrusion detection software and indexes the records in Elasticsearch.
|
||||||
https://github.com/mrkschan/uwsgibeat[uwsgibeat]:: Reads stats from uWSGI.
|
https://github.com/mrkschan/uwsgibeat[uwsgibeat]:: Reads stats from uWSGI.
|
||||||
|
291
vendor/github.com/elastic/beats/libbeat/docs/processors-config.asciidoc
generated
vendored
291
vendor/github.com/elastic/beats/libbeat/docs/processors-config.asciidoc
generated
vendored
@ -11,22 +11,21 @@
|
|||||||
//////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
[[configuration-processors]]
|
[[configuration-processors]]
|
||||||
=== Processors Configuration
|
== Processors
|
||||||
|
|
||||||
include::../../libbeat/docs/processors.asciidoc[]
|
include::../../libbeat/docs/processors.asciidoc[]
|
||||||
|
|
||||||
Each processor has associated an action with a set of parameters and optionally a condition. If the condition is
|
To define a processor, you specify the processor name, an optional condition,
|
||||||
present, then the action is executed only if the condition
|
and a set of parameters:
|
||||||
is fulfilled. If no condition is passed then the action is always executed.
|
|
||||||
|
|
||||||
[source,yaml]
|
[source,yaml]
|
||||||
------
|
------
|
||||||
processors:
|
processors:
|
||||||
- <action>:
|
- <processor_name>:
|
||||||
when:
|
when:
|
||||||
<condition>
|
<condition>
|
||||||
<parameters>
|
<parameters>
|
||||||
- <action>:
|
- <processor_name>:
|
||||||
when:
|
when:
|
||||||
<condition>
|
<condition>
|
||||||
<parameters>
|
<parameters>
|
||||||
@ -34,21 +33,33 @@ processors:
|
|||||||
|
|
||||||
------
|
------
|
||||||
|
|
||||||
where <action> can be a way to select the fields that are exported or a way to add meta data to the event , <condition> contains the definition of the condition.
|
Where:
|
||||||
and <parameters> is the list of parameters passed along the <action>.
|
|
||||||
|
* <processor_name> specifies a <<processors,processor>> that performs some kind
|
||||||
|
of action, such as selecting the fields that are exported or adding metadata to
|
||||||
|
the event.
|
||||||
|
* <when: condition> specifies an optional <<conditions,condition>>. If the
|
||||||
|
condition is present, then the action is executed only if the condition is
|
||||||
|
fulfilled. If no condition is passed, then the action is always executed.
|
||||||
|
* <parameters> is the list of parameters to pass to the processor.
|
||||||
|
|
||||||
See <<filtering-and-enhancing-data>> for specific {beatname_uc} examples.
|
See <<filtering-and-enhancing-data>> for specific {beatname_uc} examples.
|
||||||
|
|
||||||
[[filtering-condition]]
|
[float]
|
||||||
==== Condition
|
[[conditions]]
|
||||||
|
=== Conditions
|
||||||
|
|
||||||
Each condition receives a field to compare or multiple fields under the same condition and then `AND` is used between
|
Each condition receives a field to compare. You can specify multiple fields
|
||||||
them. You can see a list of the <<exported-fields,`exported fields`>>.
|
under the same condition by using `AND` between the fields (for example,
|
||||||
|
`field1 AND field2`).
|
||||||
|
|
||||||
For each field, you can specify a simple field name or a nested map, for example `dns.question.name`.
|
For each field, you can specify a simple field name or a nested map, for
|
||||||
|
example `dns.question.name`.
|
||||||
|
|
||||||
|
See <<exported-fields>> for a list of all the fields that are exported by
|
||||||
|
{beatname_uc}.
|
||||||
|
|
||||||
A condition can be:
|
The supported conditions are:
|
||||||
|
|
||||||
* <<condition-equals,`equals`>>
|
* <<condition-equals,`equals`>>
|
||||||
* <<condition-contains,`contains`>>
|
* <<condition-contains,`contains`>>
|
||||||
@ -59,14 +70,15 @@ A condition can be:
|
|||||||
* <<condition-not, `not`>>
|
* <<condition-not, `not`>>
|
||||||
|
|
||||||
|
|
||||||
|
[float]
|
||||||
[[condition-equals]]
|
[[condition-equals]]
|
||||||
===== equals
|
==== equals
|
||||||
|
|
||||||
With the `equals` condition, you can compare if a field has a certain value. The condition accepts only an integer or a string
|
With the `equals` condition, you can compare if a field has a certain value.
|
||||||
value.
|
The condition accepts only an integer or a string value.
|
||||||
|
|
||||||
For example, the following condition checks if the response code of the HTTP transaction is 200:
|
For example, the following condition checks if the response code of the HTTP
|
||||||
|
transaction is 200:
|
||||||
|
|
||||||
[source,yaml]
|
[source,yaml]
|
||||||
-------
|
-------
|
||||||
@ -74,13 +86,15 @@ equals:
|
|||||||
http.response.code: 200
|
http.response.code: 200
|
||||||
-------
|
-------
|
||||||
|
|
||||||
|
[float]
|
||||||
[[condition-contains]]
|
[[condition-contains]]
|
||||||
===== contains
|
==== contains
|
||||||
|
|
||||||
The `contains` condition checks if a value is part of a field. The field can be
|
The `contains` condition checks if a value is part of a field. The field can be
|
||||||
a string or an array of strings. The condition accepts only a string value.
|
a string or an array of strings. The condition accepts only a string value.
|
||||||
|
|
||||||
For example, the following condition checks if an error is part of the transaction status:
|
For example, the following condition checks if an error is part of the
|
||||||
|
transaction status:
|
||||||
|
|
||||||
[source,yaml]
|
[source,yaml]
|
||||||
------
|
------
|
||||||
@ -88,13 +102,15 @@ contains:
|
|||||||
status: "Specific error"
|
status: "Specific error"
|
||||||
------
|
------
|
||||||
|
|
||||||
|
[float]
|
||||||
[[condition-regexp]]
|
[[condition-regexp]]
|
||||||
===== regexp
|
==== regexp
|
||||||
|
|
||||||
The `regexp` condition checks the field against a regular expression. The condition accepts only strings.
|
The `regexp` condition checks the field against a regular expression. The
|
||||||
|
condition accepts only strings.
|
||||||
|
|
||||||
For example, the following condition checks if the process name starts with `foo`:
|
For example, the following condition checks if the process name starts with
|
||||||
|
`foo`:
|
||||||
|
|
||||||
[source,yaml]
|
[source,yaml]
|
||||||
-----
|
-----
|
||||||
@ -102,14 +118,16 @@ reqexp:
|
|||||||
system.process.name: "foo.*"
|
system.process.name: "foo.*"
|
||||||
-----
|
-----
|
||||||
|
|
||||||
|
[float]
|
||||||
[[condition-range]]
|
[[condition-range]]
|
||||||
===== range
|
==== range
|
||||||
|
|
||||||
The `range` condition checks if the field is in a certain range of values. The condition supports `lt`, `lte`, `gt` and `gte`. The
|
The `range` condition checks if the field is in a certain range of values. The
|
||||||
condition accepts only integer or float values.
|
condition supports `lt`, `lte`, `gt` and `gte`. The condition accepts only
|
||||||
|
integer or float values.
|
||||||
|
|
||||||
For example, the following condition checks for failed HTTP transaction by comparing the `http.response.code` field with
|
For example, the following condition checks for failed HTTP transactions by
|
||||||
400.
|
comparing the `http.response.code` field with 400.
|
||||||
|
|
||||||
|
|
||||||
[source,yaml]
|
[source,yaml]
|
||||||
@ -119,7 +137,7 @@ range:
|
|||||||
gte: 400
|
gte: 400
|
||||||
------
|
------
|
||||||
|
|
||||||
that can be also translated to:
|
This can also be written as:
|
||||||
|
|
||||||
[source,yaml]
|
[source,yaml]
|
||||||
----
|
----
|
||||||
@ -127,7 +145,8 @@ range:
|
|||||||
http.response.code.gte: 400
|
http.response.code.gte: 400
|
||||||
----
|
----
|
||||||
|
|
||||||
For example, the following condition checks if the CPU usage in percentage has a value between 0.5 and 0.8.
|
The following condition checks if the CPU usage in percentage has a value
|
||||||
|
between 0.5 and 0.8.
|
||||||
|
|
||||||
[source,yaml]
|
[source,yaml]
|
||||||
------
|
------
|
||||||
@ -137,8 +156,9 @@ range:
|
|||||||
------
|
------
|
||||||
|
|
||||||
|
|
||||||
|
[float]
|
||||||
[[condition-or]]
|
[[condition-or]]
|
||||||
===== OR
|
==== OR
|
||||||
|
|
||||||
The `or` operator receives a list of conditions.
|
The `or` operator receives a list of conditions.
|
||||||
|
|
||||||
@ -152,7 +172,8 @@ or:
|
|||||||
|
|
||||||
-------
|
-------
|
||||||
|
|
||||||
For example the condition `http.response.code = 304 OR http.response.code = 404` translates to:
|
For example, to configure the condition
|
||||||
|
`http.response.code = 304 OR http.response.code = 404`:
|
||||||
|
|
||||||
[source,yaml]
|
[source,yaml]
|
||||||
------
|
------
|
||||||
@ -163,9 +184,9 @@ or:
|
|||||||
http.response.code: 404
|
http.response.code: 404
|
||||||
------
|
------
|
||||||
|
|
||||||
|
[float]
|
||||||
[[condition-and]]
|
[[condition-and]]
|
||||||
===== AND
|
==== AND
|
||||||
|
|
||||||
The `and` operator receives a list of conditions.
|
The `and` operator receives a list of conditions.
|
||||||
|
|
||||||
@ -179,7 +200,8 @@ and:
|
|||||||
|
|
||||||
-------
|
-------
|
||||||
|
|
||||||
For example the condition `http.response.code = 200 AND status = OK` translates to:
|
For example, to configure the condition
|
||||||
|
`http.response.code = 200 AND status = OK`:
|
||||||
|
|
||||||
[source,yaml]
|
[source,yaml]
|
||||||
------
|
------
|
||||||
@ -202,8 +224,9 @@ or:
|
|||||||
|
|
||||||
------
|
------
|
||||||
|
|
||||||
|
[float]
|
||||||
[[condition-not]]
|
[[condition-not]]
|
||||||
===== NOT
|
==== NOT
|
||||||
|
|
||||||
The `not` operator receives the condition to negate.
|
The `not` operator receives the condition to negate.
|
||||||
|
|
||||||
@ -214,7 +237,7 @@ not:
|
|||||||
|
|
||||||
-------
|
-------
|
||||||
|
|
||||||
For example the condition `NOT status = OK` translates to:
|
For example, to configure the condition `NOT status = OK`:
|
||||||
|
|
||||||
[source,yaml]
|
[source,yaml]
|
||||||
------
|
------
|
||||||
@ -223,81 +246,24 @@ not:
|
|||||||
status: OK
|
status: OK
|
||||||
------
|
------
|
||||||
|
|
||||||
|
[float]
|
||||||
|
[[processors]]
|
||||||
|
=== Processors
|
||||||
|
|
||||||
|
The supported processors are:
|
||||||
|
|
||||||
==== Actions
|
|
||||||
|
|
||||||
The supported actions are:
|
|
||||||
|
|
||||||
* <<include-fields,`include_fields`>>
|
|
||||||
* <<drop-fields,`drop_fields`>>
|
|
||||||
* <<drop-event,`drop_event`>>
|
|
||||||
* <<add-cloud-metadata,`add_cloud_metadata`>>
|
* <<add-cloud-metadata,`add_cloud_metadata`>>
|
||||||
|
* <<decode-json-fields,`decode_json_fields`>>
|
||||||
See <<exported-fields>> for the full list of possible fields.
|
* <<drop-event,`drop_event`>>
|
||||||
|
* <<drop-fields,`drop_fields`>>
|
||||||
[[include-fields]]
|
* <<include-fields,`include_fields`>>
|
||||||
===== include_fields
|
|
||||||
|
|
||||||
The `include_fields` action specifies what fields to export if a certain condition is fulfilled. The condition is
|
|
||||||
optional and if it's missing then the defined fields are always exported. The `@timestamp` and
|
|
||||||
`type` fields are always exported, even if they are not defined in the `include_fields` list.
|
|
||||||
|
|
||||||
[source,yaml]
|
|
||||||
-------
|
|
||||||
processors:
|
|
||||||
- include_fields:
|
|
||||||
when:
|
|
||||||
condition
|
|
||||||
fields: ["field1", "field2", ...]
|
|
||||||
-------
|
|
||||||
|
|
||||||
You can specify multiple `include_fields` actions under the `processors` section.
|
|
||||||
|
|
||||||
|
|
||||||
NOTE: If you define an empty list of fields under `include_fields`, then only the required fields, `@timestamp` and `type`, are
|
|
||||||
exported.
|
|
||||||
|
|
||||||
|
|
||||||
[[drop-fields]]
|
|
||||||
===== drop_fields
|
|
||||||
|
|
||||||
The `drop_fields` action specifies what fields to drop if a certain condition is fulfilled. The condition is optional
|
|
||||||
and if it's missing then the defined fields are always dropped. The `@timestamp` and `type` fields cannot be dropped,
|
|
||||||
even if they show up in the `drop_fields` list.
|
|
||||||
|
|
||||||
[source,yaml]
|
|
||||||
-----------------------------------------------------
|
|
||||||
processors:
|
|
||||||
- drop_fields:
|
|
||||||
when:
|
|
||||||
condition
|
|
||||||
fields: ["field1", "field2", ...]
|
|
||||||
-----------------------------------------------------
|
|
||||||
|
|
||||||
NOTE: If you define an empty list of fields under `drop_fields`, then no fields are dropped.
|
|
||||||
|
|
||||||
|
|
||||||
[[drop-event]]
|
|
||||||
===== drop_event
|
|
||||||
|
|
||||||
The `drop_event` action drops the entire event if the associated condition is fulfilled. The condition is mandatory, as
|
|
||||||
without one all the events are dropped.
|
|
||||||
|
|
||||||
[source,yaml]
|
|
||||||
------
|
|
||||||
processors:
|
|
||||||
- drop_event:
|
|
||||||
when:
|
|
||||||
condition
|
|
||||||
------
|
|
||||||
|
|
||||||
[[add-cloud-metadata]]
|
[[add-cloud-metadata]]
|
||||||
===== add_cloud_metadata
|
=== add_cloud_metadata
|
||||||
|
|
||||||
The `add_cloud_metadata` action enriches each event with instance metadata from
|
The `add_cloud_metadata` processor enriches each event with instance metadata
|
||||||
the machine's hosting provider. At startup it will detect the hosting provider
|
from the machine's hosting provider. At startup it will detect the hosting
|
||||||
and cache the instance metadata.
|
provider and cache the instance metadata.
|
||||||
|
|
||||||
Three cloud providers are supported.
|
Three cloud providers are supported.
|
||||||
|
|
||||||
@ -308,14 +274,16 @@ Three cloud providers are supported.
|
|||||||
The simple configuration below enables the processor.
|
The simple configuration below enables the processor.
|
||||||
|
|
||||||
[source,yaml]
|
[source,yaml]
|
||||||
--------------------------------------------------------------------------------
|
-------------------------------------------------------------------------------
|
||||||
processors:
|
processors:
|
||||||
- add_cloud_metadata:
|
- add_cloud_metadata:
|
||||||
--------------------------------------------------------------------------------
|
-------------------------------------------------------------------------------
|
||||||
|
|
||||||
The `add_cloud_metadata` action has one optional configuration setting named
|
The `add_cloud_metadata` processor has one optional configuration setting named
|
||||||
`timeout` that specifies the maximum amount of time to wait for a successful
|
`timeout` that specifies the maximum amount of time to wait for a successful
|
||||||
response when detecting the hosting provider. The default timeout value is `3s`.
|
response when detecting the hosting provider. The default timeout value is
|
||||||
|
`3s`.
|
||||||
|
|
||||||
If a timeout occurs then no instance metadata will be added to the events. This
|
If a timeout occurs then no instance metadata will be added to the events. This
|
||||||
makes it possible to enable this processor for all your deployments (in the
|
makes it possible to enable this processor for all your deployments (in the
|
||||||
cloud or on-premise).
|
cloud or on-premise).
|
||||||
@ -326,7 +294,7 @@ examples for each of the supported providers.
|
|||||||
_EC2_
|
_EC2_
|
||||||
|
|
||||||
[source,json]
|
[source,json]
|
||||||
--------------------------------------------------------------------------------
|
-------------------------------------------------------------------------------
|
||||||
{
|
{
|
||||||
"meta": {
|
"meta": {
|
||||||
"cloud": {
|
"cloud": {
|
||||||
@ -338,12 +306,12 @@ _EC2_
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------------------------------------
|
-------------------------------------------------------------------------------
|
||||||
|
|
||||||
_Digital Ocean_
|
_Digital Ocean_
|
||||||
|
|
||||||
[source,json]
|
[source,json]
|
||||||
--------------------------------------------------------------------------------
|
-------------------------------------------------------------------------------
|
||||||
{
|
{
|
||||||
"meta": {
|
"meta": {
|
||||||
"cloud": {
|
"cloud": {
|
||||||
@ -353,12 +321,12 @@ _Digital Ocean_
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------------------------------------
|
-------------------------------------------------------------------------------
|
||||||
|
|
||||||
_GCE_
|
_GCE_
|
||||||
|
|
||||||
[source,json]
|
[source,json]
|
||||||
--------------------------------------------------------------------------------
|
-------------------------------------------------------------------------------
|
||||||
{
|
{
|
||||||
"meta": {
|
"meta": {
|
||||||
"cloud": {
|
"cloud": {
|
||||||
@ -370,4 +338,91 @@ _GCE_
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------------------------------------
|
-------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
[[decode-json-fields]]
|
||||||
|
=== decode_json_fields
|
||||||
|
|
||||||
|
The `decode_json_fields` processor decodes fields containing JSON strings and
|
||||||
|
replaces the strings with valid JSON objects.
|
||||||
|
|
||||||
|
[source,yaml]
|
||||||
|
-----------------------------------------------------
|
||||||
|
processors:
|
||||||
|
- decode_json_fields:
|
||||||
|
fields: ["field1", "field2", ...]
|
||||||
|
process_array: false
|
||||||
|
max_depth: 1
|
||||||
|
-----------------------------------------------------
|
||||||
|
|
||||||
|
The `decode_json_fields` processor has the following configuration settings:
|
||||||
|
|
||||||
|
`fields`:: The fields containing JSON strings to decode.
|
||||||
|
`process_array`:: (Optional) A boolean that specifies whether to process
|
||||||
|
arrays. The default is false.
|
||||||
|
`max_depth`:: (Optional) The maximum parsing depth. The default is 1.
|
||||||
|
|
||||||
|
[[drop-event]]
|
||||||
|
=== drop_event
|
||||||
|
|
||||||
|
The `drop_event` processor drops the entire event if the associated condition
|
||||||
|
is fulfilled. The condition is mandatory, because without one, all the events
|
||||||
|
are dropped.
|
||||||
|
|
||||||
|
[source,yaml]
|
||||||
|
------
|
||||||
|
processors:
|
||||||
|
- drop_event:
|
||||||
|
when:
|
||||||
|
condition
|
||||||
|
------
|
||||||
|
|
||||||
|
See <<conditions>> for a list of supported conditions.
|
||||||
|
|
||||||
|
[[drop-fields]]
|
||||||
|
=== drop_fields
|
||||||
|
|
||||||
|
The `drop_fields` processor specifies which fields to drop if a certain
|
||||||
|
condition is fulfilled. The condition is optional. If it's missing, the
|
||||||
|
specified fields are always dropped. The `@timestamp` and `type` fields cannot
|
||||||
|
be dropped, even if they show up in the `drop_fields` list.
|
||||||
|
|
||||||
|
[source,yaml]
|
||||||
|
-----------------------------------------------------
|
||||||
|
processors:
|
||||||
|
- drop_fields:
|
||||||
|
when:
|
||||||
|
condition
|
||||||
|
fields: ["field1", "field2", ...]
|
||||||
|
-----------------------------------------------------
|
||||||
|
|
||||||
|
See <<conditions>> for a list of supported conditions.
|
||||||
|
|
||||||
|
NOTE: If you define an empty list of fields under `drop_fields`, then no fields
|
||||||
|
are dropped.
|
||||||
|
|
||||||
|
[[include-fields]]
|
||||||
|
=== include_fields
|
||||||
|
|
||||||
|
The `include_fields` processor specifies which fields to export if a certain
|
||||||
|
condition is fulfilled. The condition is optional. If it's missing, the
|
||||||
|
specified fields are always exported. The `@timestamp` and `type` fields are
|
||||||
|
always exported, even if they are not defined in the `include_fields` list.
|
||||||
|
|
||||||
|
[source,yaml]
|
||||||
|
-------
|
||||||
|
processors:
|
||||||
|
- include_fields:
|
||||||
|
when:
|
||||||
|
condition
|
||||||
|
fields: ["field1", "field2", ...]
|
||||||
|
-------
|
||||||
|
|
||||||
|
See <<conditions>> for a list of supported conditions.
|
||||||
|
|
||||||
|
You can specify multiple `include_fields` processors under the `processors`
|
||||||
|
section.
|
||||||
|
|
||||||
|
NOTE: If you define an empty list of fields under `include_fields`, then only
|
||||||
|
the required fields, `@timestamp` and `type`, are exported.
|
||||||
|
|
||||||
|
17
vendor/github.com/elastic/beats/libbeat/docs/processors.asciidoc
generated
vendored
17
vendor/github.com/elastic/beats/libbeat/docs/processors.asciidoc
generated
vendored
@ -9,15 +9,18 @@
|
|||||||
//// include::../../libbeat/docs/filtering.asciidoc[]
|
//// include::../../libbeat/docs/filtering.asciidoc[]
|
||||||
//////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
You can define processors in your configuration to process events before they are sent to the configured output.
|
You can define processors in your configuration to process events before they
|
||||||
The libbeat library provides processors for reducing the number of exported fields, and processors for
|
are sent to the configured output.The libbeat library provides processors for:
|
||||||
enhancing events with additional metadata. Each processor receives an event, applies a defined action to the event,
|
|
||||||
and returns the event. If you define a list of processors, they are executed in the order they are defined in the
|
* reducing the number of exported fields
|
||||||
configuration file.
|
* enhancing events with additional metadata
|
||||||
|
* performing additional processing and decoding
|
||||||
|
|
||||||
|
Each processor receives an event, applies a defined action to the event, and
|
||||||
|
returns the event. If you define a list of processors, they are executed in the
|
||||||
|
order they are defined in the {beatname_uc} configuration file.
|
||||||
|
|
||||||
[source,yaml]
|
[source,yaml]
|
||||||
-------
|
-------
|
||||||
event -> processor 1 -> event1 -> processor 2 -> event2 ...
|
event -> processor 1 -> event1 -> processor 2 -> event2 ...
|
||||||
-------
|
-------
|
||||||
|
|
||||||
The processors are defined in the {beatname_uc} configuration file.
|
|
||||||
|
1
vendor/github.com/elastic/beats/libbeat/docs/release.asciidoc
generated
vendored
1
vendor/github.com/elastic/beats/libbeat/docs/release.asciidoc
generated
vendored
@ -6,6 +6,7 @@
|
|||||||
--
|
--
|
||||||
This section summarizes the changes in each release.
|
This section summarizes the changes in each release.
|
||||||
|
|
||||||
|
* <<release-notes-5.1.2>>
|
||||||
* <<release-notes-5.1.1>>
|
* <<release-notes-5.1.1>>
|
||||||
* <<release-notes-5.1.0>>
|
* <<release-notes-5.1.0>>
|
||||||
* <<release-notes-5.0.1>>
|
* <<release-notes-5.0.1>>
|
||||||
|
4
vendor/github.com/elastic/beats/libbeat/docs/repositories.asciidoc
generated
vendored
4
vendor/github.com/elastic/beats/libbeat/docs/repositories.asciidoc
generated
vendored
@ -34,7 +34,7 @@ sudo apt-get install apt-transport-https
|
|||||||
+
|
+
|
||||||
["source","sh",subs="attributes,callouts"]
|
["source","sh",subs="attributes,callouts"]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
echo "deb https://artifacts.elastic.co/packages/5.x-prerelease/apt stable main" | sudo tee -a /etc/apt/sources.list.d/elastic-5.x.list
|
echo "deb https://artifacts.elastic.co/packages/5.x/apt stable main" | sudo tee -a /etc/apt/sources.list.d/elastic-5.x.list
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
+
|
+
|
||||||
[WARNING]
|
[WARNING]
|
||||||
@ -85,7 +85,7 @@ your `/etc/yum.repos.d/` directory and add the following lines:
|
|||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
[elastic-5.x]
|
[elastic-5.x]
|
||||||
name=Elastic repository for 5.x packages
|
name=Elastic repository for 5.x packages
|
||||||
baseurl=https://artifacts.elastic.co/packages/5.x-prerelease/yum
|
baseurl=https://artifacts.elastic.co/packages/5.x/yum
|
||||||
gpgcheck=1
|
gpgcheck=1
|
||||||
gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
|
gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
|
||||||
enabled=1
|
enabled=1
|
||||||
|
1
vendor/github.com/elastic/beats/libbeat/docs/version.asciidoc
generated
vendored
1
vendor/github.com/elastic/beats/libbeat/docs/version.asciidoc
generated
vendored
@ -1,2 +1,3 @@
|
|||||||
:stack-version: 5.1.1
|
:stack-version: 5.1.1
|
||||||
:doc-branch: 5.1
|
:doc-branch: 5.1
|
||||||
|
:go-version: 1.7.1
|
||||||
|
5
vendor/github.com/elastic/beats/libbeat/scripts/Makefile
generated
vendored
5
vendor/github.com/elastic/beats/libbeat/scripts/Makefile
generated
vendored
@ -43,7 +43,8 @@ TEST_ENVIRONMENT?=false
|
|||||||
SYSTEM_TESTS?=false
|
SYSTEM_TESTS?=false
|
||||||
GOX_OS?=linux darwin windows solaris freebsd netbsd openbsd
|
GOX_OS?=linux darwin windows solaris freebsd netbsd openbsd
|
||||||
TESTING_ENVIRONMENT?=snapshot
|
TESTING_ENVIRONMENT?=snapshot
|
||||||
DOCKER_COMPOSE?=docker-compose -f ${PWD}/../testing/environments/base.yml -f ${PWD}/../testing/environments/${TESTING_ENVIRONMENT}.yml -f docker-compose.yml
|
DOCKER_COMPOSE_PROJECT_NAME?=${BEATNAME}_${TESTING_ENVIRONMENT}
|
||||||
|
DOCKER_COMPOSE?=docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} -f ${PWD}/../testing/environments/base.yml -f ${PWD}/../testing/environments/${TESTING_ENVIRONMENT}.yml -f docker-compose.yml
|
||||||
DOCKER_CACHE?=1 # If set to 0, all docker images are created without cache
|
DOCKER_CACHE?=1 # If set to 0, all docker images are created without cache
|
||||||
GOPACKAGES_COMMA_SEP=$(subst $(space),$(comma),$(strip ${GOPACKAGES}))
|
GOPACKAGES_COMMA_SEP=$(subst $(space),$(comma),$(strip ${GOPACKAGES}))
|
||||||
PYTHON_ENV?=${BUILD_DIR}/python-env
|
PYTHON_ENV?=${BUILD_DIR}/python-env
|
||||||
@ -304,7 +305,7 @@ start-environment: stop-environment
|
|||||||
.PHONY: stop-environment
|
.PHONY: stop-environment
|
||||||
stop-environment:
|
stop-environment:
|
||||||
-${DOCKER_COMPOSE} stop
|
-${DOCKER_COMPOSE} stop
|
||||||
-${DOCKER_COMPOSE} rm -f -v -a
|
-${DOCKER_COMPOSE} rm -f -v
|
||||||
|
|
||||||
.PHONY: write-environment
|
.PHONY: write-environment
|
||||||
write-environment:
|
write-environment:
|
||||||
|
2
vendor/github.com/elastic/beats/libbeat/scripts/generate_index_pattern.py
generated
vendored
2
vendor/github.com/elastic/beats/libbeat/scripts/generate_index_pattern.py
generated
vendored
@ -60,6 +60,8 @@ def field_to_json(desc, path, output):
|
|||||||
field["aggregatable"] = False
|
field["aggregatable"] = False
|
||||||
elif desc["type"] == "date":
|
elif desc["type"] == "date":
|
||||||
field["type"] = "date"
|
field["type"] = "date"
|
||||||
|
elif desc["type"] == "geo_point":
|
||||||
|
field["type"] = "geo_point"
|
||||||
else:
|
else:
|
||||||
field["type"] = "string"
|
field["type"] = "string"
|
||||||
|
|
||||||
|
2
vendor/github.com/elastic/beats/metricbeat/_meta/beat.full.yml
generated
vendored
2
vendor/github.com/elastic/beats/metricbeat/_meta/beat.full.yml
generated
vendored
@ -67,7 +67,7 @@ metricbeat.modules:
|
|||||||
# Password of hosts. Empty by default
|
# Password of hosts. Empty by default
|
||||||
#password: test123
|
#password: test123
|
||||||
|
|
||||||
#------------------------------- docker Module -------------------------------
|
#------------------------------- Docker Module -------------------------------
|
||||||
#- module: docker
|
#- module: docker
|
||||||
#metricsets: ["cpu", "info", "memory", "network", "diskio", "container"]
|
#metricsets: ["cpu", "info", "memory", "network", "diskio", "container"]
|
||||||
#hosts: ["unix:///var/run/docker.sock"]
|
#hosts: ["unix:///var/run/docker.sock"]
|
||||||
|
36
vendor/github.com/elastic/beats/metricbeat/docs/fields.asciidoc
generated
vendored
36
vendor/github.com/elastic/beats/metricbeat/docs/fields.asciidoc
generated
vendored
@ -506,7 +506,7 @@ The document type. Always set to "metricsets".
|
|||||||
|
|
||||||
|
|
||||||
[[exported-fields-docker]]
|
[[exported-fields-docker]]
|
||||||
== docker Fields
|
== Docker Fields
|
||||||
|
|
||||||
experimental[]
|
experimental[]
|
||||||
Docker stats collected from Docker.
|
Docker stats collected from Docker.
|
||||||
@ -516,14 +516,14 @@ Docker stats collected from Docker.
|
|||||||
[float]
|
[float]
|
||||||
== docker Fields
|
== docker Fields
|
||||||
|
|
||||||
docker contains different informations and statistics of docker's containers running
|
Information and statistics about docker's running containers.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
== container Fields
|
== container Fields
|
||||||
|
|
||||||
Docker container metrics
|
Docker container metrics.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -532,7 +532,7 @@ Docker container metrics
|
|||||||
|
|
||||||
type: keyword
|
type: keyword
|
||||||
|
|
||||||
Executed command in docker container.
|
Command that was executed in the Docker container.
|
||||||
|
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
@ -540,7 +540,7 @@ Executed command in docker container.
|
|||||||
|
|
||||||
type: date
|
type: date
|
||||||
|
|
||||||
Date then the container was created.
|
Date when the container was created.
|
||||||
|
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
@ -595,13 +595,13 @@ Total size of all the files in the container.
|
|||||||
|
|
||||||
type: long
|
type: long
|
||||||
|
|
||||||
Size of the files which have been created or changed since creation.
|
Size of the files that have been created or changed since creation.
|
||||||
|
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
== cpu Fields
|
== cpu Fields
|
||||||
|
|
||||||
Runtime cpu metrics.
|
Runtime CPU metrics.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -612,7 +612,7 @@ type: scaled_float
|
|||||||
|
|
||||||
format: percentage
|
format: percentage
|
||||||
|
|
||||||
The system kernel consumed by The Docker server.
|
The system kernel consumed by the Docker server.
|
||||||
|
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
@ -620,7 +620,7 @@ The system kernel consumed by The Docker server.
|
|||||||
|
|
||||||
type: long
|
type: long
|
||||||
|
|
||||||
Cpu kernel tikcs
|
CPU kernel ticks.
|
||||||
|
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
@ -637,7 +637,7 @@ format: percentage
|
|||||||
|
|
||||||
type: long
|
type: long
|
||||||
|
|
||||||
Cpu system tikcs
|
CPU system ticks.
|
||||||
|
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
@ -654,7 +654,7 @@ format: percentage
|
|||||||
|
|
||||||
type: long
|
type: long
|
||||||
|
|
||||||
Cpu user tikcs
|
CPU user ticks
|
||||||
|
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
@ -664,13 +664,13 @@ type: scaled_float
|
|||||||
|
|
||||||
format: percentage
|
format: percentage
|
||||||
|
|
||||||
Total cpu usage.
|
Total CPU usage.
|
||||||
|
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
== diskio Fields
|
== diskio Fields
|
||||||
|
|
||||||
Diskio metrics.
|
Disk I/O metrics.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -695,14 +695,14 @@ Number of writes.
|
|||||||
|
|
||||||
type: scaled_float
|
type: scaled_float
|
||||||
|
|
||||||
Reads and writes numbers combined.
|
Number of reads and writes combined.
|
||||||
|
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
== info Fields
|
== info Fields
|
||||||
|
|
||||||
experimental[]
|
experimental[]
|
||||||
info metrics based on https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/display-system-wide-information
|
Info metrics based on https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/display-system-wide-information.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -750,7 +750,7 @@ Total number of existing containers.
|
|||||||
|
|
||||||
type: keyword
|
type: keyword
|
||||||
|
|
||||||
Unique docker host identifier.
|
Unique Docker host identifier.
|
||||||
|
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
@ -789,7 +789,7 @@ Memory limit.
|
|||||||
[float]
|
[float]
|
||||||
== rss Fields
|
== rss Fields
|
||||||
|
|
||||||
Rss memory stats.
|
RSS memory stats.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -849,7 +849,7 @@ Total memory usage.
|
|||||||
[float]
|
[float]
|
||||||
== network Fields
|
== network Fields
|
||||||
|
|
||||||
Netowrk metrics.
|
Network metrics.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
38
vendor/github.com/elastic/beats/metricbeat/docs/metricbeat-filtering.asciidoc
generated
vendored
38
vendor/github.com/elastic/beats/metricbeat/docs/metricbeat-filtering.asciidoc
generated
vendored
@ -1,44 +1,10 @@
|
|||||||
[[filtering-and-enhancing-data]]
|
[[filtering-and-enhancing-data]]
|
||||||
== Filtering and Enhancing the Exported Data
|
== Filtering and Enhancing the Exported Data
|
||||||
|
|
||||||
When your use case requires only a subset of the data exported by Metricbeat or you need to add metadata,
|
|
||||||
you can <<metricbeat-filtering-overview,use Metricbeat config options to filter
|
|
||||||
the data>>, or you can <<defining-processors,define processors>>.
|
|
||||||
|
|
||||||
|
|
||||||
[float]
|
|
||||||
[[metricbeat-filtering-overview]]
|
|
||||||
=== Metricbeat Module Filtering
|
|
||||||
|
|
||||||
Each module accepts a list of filters in its configuration. These filters are
|
|
||||||
applied to the data generated by the module. These filters are applied to the
|
|
||||||
module data prior to the addition of the common beat fields like `beat.hostname`
|
|
||||||
and `type` so they can only be used to filter fields from the module.
|
|
||||||
|
|
||||||
The following example reduces the exported fields of the Redis module to
|
|
||||||
include only the `redis.info.memory` fields.
|
|
||||||
|
|
||||||
[source,yaml]
|
|
||||||
----
|
|
||||||
metricbeat.modules:
|
|
||||||
- module: redis
|
|
||||||
metricsets: ["info"]
|
|
||||||
period: 1s
|
|
||||||
hosts: ["127.0.0.1:6379"]
|
|
||||||
enabled: true
|
|
||||||
filters:
|
|
||||||
- include_fields:
|
|
||||||
fields: ['memory']
|
|
||||||
----
|
|
||||||
|
|
||||||
[float]
|
|
||||||
[[defining-processors]]
|
|
||||||
=== Defining Processors
|
|
||||||
|
|
||||||
include::../../libbeat/docs/processors.asciidoc[]
|
include::../../libbeat/docs/processors.asciidoc[]
|
||||||
|
|
||||||
For example, the following filters configuration reduces the exported fields by
|
For example, the following configuration reduces the exported fields by
|
||||||
dropping the `beat.name` and `beat.hostname` fields under `beat`, from all documents.
|
dropping the `beat.name` and `beat.hostname` fields under `beat` from all documents.
|
||||||
|
|
||||||
[source, yaml]
|
[source, yaml]
|
||||||
----
|
----
|
||||||
|
25
vendor/github.com/elastic/beats/metricbeat/docs/modules.asciidoc
generated
vendored
25
vendor/github.com/elastic/beats/metricbeat/docs/modules.asciidoc
generated
vendored
@ -7,28 +7,7 @@ This section contains detailed information about the metric collecting modules
|
|||||||
contained in {beatname_uc}. Each module contains one or multiple metricsets. More details
|
contained in {beatname_uc}. Each module contains one or multiple metricsets. More details
|
||||||
about each module can be found under the links below.
|
about each module can be found under the links below.
|
||||||
|
|
||||||
* <<metricbeat-module-apache,Apache>>
|
include::modules_list.asciidoc[]
|
||||||
* <<metricbeat-module-docker,Docker>>
|
|
||||||
* <<metricbeat-module-haproxy,HAProxy>>
|
|
||||||
* <<metricbeat-module-mongodb,MongoDB>>
|
|
||||||
* <<metricbeat-module-mysql,MySQL>>
|
|
||||||
* <<metricbeat-module-nginx,Nginx>>
|
|
||||||
* <<metricbeat-module-redis,Redis>>
|
|
||||||
* <<metricbeat-module-postgresql,Postgresql>>
|
|
||||||
* <<metricbeat-module-system,System>>
|
|
||||||
* <<metricbeat-module-zookeeper,ZooKeeper>>
|
|
||||||
|
|
||||||
--
|
|
||||||
|
|
||||||
include::modules/apache.asciidoc[]
|
|
||||||
include::modules/docker.asciidoc[]
|
|
||||||
include::modules/haproxy.asciidoc[]
|
|
||||||
include::modules/mongodb.asciidoc[]
|
|
||||||
include::modules/mysql.asciidoc[]
|
|
||||||
include::modules/nginx.asciidoc[]
|
|
||||||
include::modules/postgresql.asciidoc[]
|
|
||||||
include::modules/redis.asciidoc[]
|
|
||||||
include::modules/system.asciidoc[]
|
|
||||||
include::modules/zookeeper.asciidoc[]
|
|
||||||
|
|
||||||
|
|
||||||
|
6
vendor/github.com/elastic/beats/metricbeat/docs/modules/docker.asciidoc
generated
vendored
6
vendor/github.com/elastic/beats/metricbeat/docs/modules/docker.asciidoc
generated
vendored
@ -3,18 +3,18 @@ This file is generated! See scripts/docs_collector.py
|
|||||||
////
|
////
|
||||||
|
|
||||||
[[metricbeat-module-docker]]
|
[[metricbeat-module-docker]]
|
||||||
== docker Module
|
== Docker Module
|
||||||
|
|
||||||
experimental[]
|
experimental[]
|
||||||
|
|
||||||
This is the docker Module.
|
This module fetches metrics from https://www.docker.com/[Docker] containers.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
=== Example Configuration
|
=== Example Configuration
|
||||||
|
|
||||||
The docker module supports the standard configuration options that are described
|
The Docker module supports the standard configuration options that are described
|
||||||
in <<configuration-metricbeat>>. Here is an example configuration:
|
in <<configuration-metricbeat>>. Here is an example configuration:
|
||||||
|
|
||||||
[source,yaml]
|
[source,yaml]
|
||||||
|
30
vendor/github.com/elastic/beats/metricbeat/docs/modules_list.asciidoc
generated
vendored
Normal file
30
vendor/github.com/elastic/beats/metricbeat/docs/modules_list.asciidoc
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
////
|
||||||
|
This file is generated! See scripts/docs_collector.py
|
||||||
|
////
|
||||||
|
|
||||||
|
* <<metricbeat-module-apache,Apache>>
|
||||||
|
* <<metricbeat-module-docker,Docker>>
|
||||||
|
* <<metricbeat-module-haproxy,haproxy>>
|
||||||
|
* <<metricbeat-module-kafka,kafka>>
|
||||||
|
* <<metricbeat-module-mongodb,MongoDB>>
|
||||||
|
* <<metricbeat-module-mysql,MySQL>>
|
||||||
|
* <<metricbeat-module-nginx,Nginx>>
|
||||||
|
* <<metricbeat-module-postgresql,PostgreSQL>>
|
||||||
|
* <<metricbeat-module-redis,Redis>>
|
||||||
|
* <<metricbeat-module-system,System>>
|
||||||
|
* <<metricbeat-module-zookeeper,ZooKeeper>>
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
|
||||||
|
include::modules/apache.asciidoc[]
|
||||||
|
include::modules/docker.asciidoc[]
|
||||||
|
include::modules/haproxy.asciidoc[]
|
||||||
|
include::modules/kafka.asciidoc[]
|
||||||
|
include::modules/mongodb.asciidoc[]
|
||||||
|
include::modules/mysql.asciidoc[]
|
||||||
|
include::modules/nginx.asciidoc[]
|
||||||
|
include::modules/postgresql.asciidoc[]
|
||||||
|
include::modules/redis.asciidoc[]
|
||||||
|
include::modules/system.asciidoc[]
|
||||||
|
include::modules/zookeeper.asciidoc[]
|
2
vendor/github.com/elastic/beats/metricbeat/docs/reference/configuration.asciidoc
generated
vendored
2
vendor/github.com/elastic/beats/metricbeat/docs/reference/configuration.asciidoc
generated
vendored
@ -10,7 +10,6 @@ configuration settings, you need to restart {beatname_uc} to pick up the changes
|
|||||||
|
|
||||||
* <<configuration-metricbeat>>
|
* <<configuration-metricbeat>>
|
||||||
* <<configuration-general>>
|
* <<configuration-general>>
|
||||||
* <<configuration-processors>>
|
|
||||||
* <<elasticsearch-output>>
|
* <<elasticsearch-output>>
|
||||||
* <<logstash-output>>
|
* <<logstash-output>>
|
||||||
* <<kafka-output>>
|
* <<kafka-output>>
|
||||||
@ -20,5 +19,6 @@ configuration settings, you need to restart {beatname_uc} to pick up the changes
|
|||||||
* <<configuration-output-ssl>>
|
* <<configuration-output-ssl>>
|
||||||
* <<configuration-path>>
|
* <<configuration-path>>
|
||||||
* <<configuration-logging>>
|
* <<configuration-logging>>
|
||||||
|
* <<configuration-processors>>
|
||||||
|
|
||||||
include::configuration/metricbeat-options.asciidoc[]
|
include::configuration/metricbeat-options.asciidoc[]
|
||||||
|
@ -62,15 +62,17 @@ A list of tags that will be sent with the metricset event. This setting is optio
|
|||||||
|
|
||||||
===== filters
|
===== filters
|
||||||
|
|
||||||
A list of filters to apply to the data generated by the module. For more detail on how to configure
|
deprecated[5.1,This option will be renamed and changed in a future release]
|
||||||
filters, see <<configuration-processors>>.
|
|
||||||
|
A list of filters to apply to the data generated by the module.
|
||||||
|
|
||||||
include::../../../../libbeat/docs/generalconfig.asciidoc[]
|
include::../../../../libbeat/docs/generalconfig.asciidoc[]
|
||||||
|
|
||||||
include::../../../../libbeat/docs/processors-config.asciidoc[]
|
|
||||||
|
|
||||||
include::../../../../libbeat/docs/outputconfig.asciidoc[]
|
include::../../../../libbeat/docs/outputconfig.asciidoc[]
|
||||||
|
|
||||||
include::../../../../libbeat/docs/shared-path-config.asciidoc[]
|
include::../../../../libbeat/docs/shared-path-config.asciidoc[]
|
||||||
|
|
||||||
include::../../../../libbeat/docs/loggingconfig.asciidoc[]
|
include::../../../../libbeat/docs/loggingconfig.asciidoc[]
|
||||||
|
|
||||||
|
include::../../../../libbeat/docs/processors-config.asciidoc[]
|
||||||
|
|
||||||
|
1
vendor/github.com/elastic/beats/metricbeat/docs/version.asciidoc
generated
vendored
1
vendor/github.com/elastic/beats/metricbeat/docs/version.asciidoc
generated
vendored
@ -1,2 +1,3 @@
|
|||||||
:stack-version: 5.1.1
|
:stack-version: 5.1.1
|
||||||
:doc-branch: 5.1
|
:doc-branch: 5.1
|
||||||
|
:go-version: 1.7.1
|
||||||
|
2
vendor/github.com/elastic/beats/metricbeat/metricbeat.full.yml
generated
vendored
2
vendor/github.com/elastic/beats/metricbeat/metricbeat.full.yml
generated
vendored
@ -67,7 +67,7 @@ metricbeat.modules:
|
|||||||
# Password of hosts. Empty by default
|
# Password of hosts. Empty by default
|
||||||
#password: test123
|
#password: test123
|
||||||
|
|
||||||
#------------------------------- docker Module -------------------------------
|
#------------------------------- Docker Module -------------------------------
|
||||||
#- module: docker
|
#- module: docker
|
||||||
#metricsets: ["cpu", "info", "memory", "network", "diskio", "container"]
|
#metricsets: ["cpu", "info", "memory", "network", "diskio", "container"]
|
||||||
#hosts: ["unix:///var/run/docker.sock"]
|
#hosts: ["unix:///var/run/docker.sock"]
|
||||||
|
2
vendor/github.com/elastic/beats/metricbeat/metricbeat.template-es2x.json
generated
vendored
2
vendor/github.com/elastic/beats/metricbeat/metricbeat.template-es2x.json
generated
vendored
@ -7,7 +7,7 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"_meta": {
|
"_meta": {
|
||||||
"version": "5.1.1"
|
"version": "5.1.2"
|
||||||
},
|
},
|
||||||
"dynamic_templates": [
|
"dynamic_templates": [
|
||||||
{
|
{
|
||||||
|
2
vendor/github.com/elastic/beats/metricbeat/metricbeat.template.json
generated
vendored
2
vendor/github.com/elastic/beats/metricbeat/metricbeat.template.json
generated
vendored
@ -5,7 +5,7 @@
|
|||||||
"norms": false
|
"norms": false
|
||||||
},
|
},
|
||||||
"_meta": {
|
"_meta": {
|
||||||
"version": "5.1.1"
|
"version": "5.1.2"
|
||||||
},
|
},
|
||||||
"dynamic_templates": [
|
"dynamic_templates": [
|
||||||
{
|
{
|
||||||
|
4
vendor/github.com/elastic/beats/metricbeat/module/docker/_meta/docs.asciidoc
generated
vendored
4
vendor/github.com/elastic/beats/metricbeat/module/docker/_meta/docs.asciidoc
generated
vendored
@ -1,6 +1,6 @@
|
|||||||
== docker Module
|
== Docker Module
|
||||||
|
|
||||||
experimental[]
|
experimental[]
|
||||||
|
|
||||||
This is the docker Module.
|
This module fetches metrics from https://www.docker.com/[Docker] containers.
|
||||||
|
|
||||||
|
4
vendor/github.com/elastic/beats/metricbeat/module/docker/_meta/fields.yml
generated
vendored
4
vendor/github.com/elastic/beats/metricbeat/module/docker/_meta/fields.yml
generated
vendored
@ -1,5 +1,5 @@
|
|||||||
- key: docker
|
- key: docker
|
||||||
title: "docker"
|
title: "Docker"
|
||||||
description: >
|
description: >
|
||||||
experimental[]
|
experimental[]
|
||||||
|
|
||||||
@ -9,5 +9,5 @@
|
|||||||
- name: docker
|
- name: docker
|
||||||
type: group
|
type: group
|
||||||
description: >
|
description: >
|
||||||
docker contains different informations and statistics of docker's containers running
|
Information and statistics about docker's running containers.
|
||||||
fields:
|
fields:
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
=== docker container MetricSet
|
=== Docker Container Metricset
|
||||||
|
|
||||||
This is the container metricset of the module docker.
|
The Docker `container` metricset collects information and statistics about
|
||||||
|
running Docker containers.
|
||||||
|
8
vendor/github.com/elastic/beats/metricbeat/module/docker/container/_meta/fields.yml
generated
vendored
8
vendor/github.com/elastic/beats/metricbeat/module/docker/container/_meta/fields.yml
generated
vendored
@ -1,16 +1,16 @@
|
|||||||
- name: container
|
- name: container
|
||||||
type: group
|
type: group
|
||||||
description: >
|
description: >
|
||||||
Docker container metrics
|
Docker container metrics.
|
||||||
fields:
|
fields:
|
||||||
- name: command
|
- name: command
|
||||||
type: keyword
|
type: keyword
|
||||||
description: >
|
description: >
|
||||||
Executed command in docker container.
|
Command that was executed in the Docker container.
|
||||||
- name: created
|
- name: created
|
||||||
type: date
|
type: date
|
||||||
description: >
|
description: >
|
||||||
Date then the container was created.
|
Date when the container was created.
|
||||||
- name: id
|
- name: id
|
||||||
type: keyword
|
type: keyword
|
||||||
description: >
|
description: >
|
||||||
@ -39,4 +39,4 @@
|
|||||||
- name: rw
|
- name: rw
|
||||||
type: long
|
type: long
|
||||||
description: >
|
description: >
|
||||||
Size of the files which have been created or changed since creation.
|
Size of the files that have been created or changed since creation.
|
||||||
|
4
vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/_meta/docs.asciidoc
generated
vendored
4
vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/_meta/docs.asciidoc
generated
vendored
@ -1,3 +1,3 @@
|
|||||||
=== docker cpu MetricSet
|
=== Docker CPU Metricset
|
||||||
|
|
||||||
This is the cpu metricset of the module docker.
|
The Docker `cpu` metricset collects runtime CPU metrics.
|
||||||
|
12
vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/_meta/fields.yml
generated
vendored
12
vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/_meta/fields.yml
generated
vendored
@ -1,17 +1,17 @@
|
|||||||
- name: cpu
|
- name: cpu
|
||||||
type: group
|
type: group
|
||||||
description: >
|
description: >
|
||||||
Runtime cpu metrics.
|
Runtime CPU metrics.
|
||||||
fields:
|
fields:
|
||||||
- name: kernel.pct
|
- name: kernel.pct
|
||||||
type: scaled_float
|
type: scaled_float
|
||||||
format: percentage
|
format: percentage
|
||||||
description: >
|
description: >
|
||||||
The system kernel consumed by The Docker server.
|
The system kernel consumed by the Docker server.
|
||||||
- name: kernel.ticks
|
- name: kernel.ticks
|
||||||
type: long
|
type: long
|
||||||
description: >
|
description: >
|
||||||
Cpu kernel tikcs
|
CPU kernel ticks.
|
||||||
- name: system.pct
|
- name: system.pct
|
||||||
type: scaled_float
|
type: scaled_float
|
||||||
format: percentage
|
format: percentage
|
||||||
@ -19,7 +19,7 @@
|
|||||||
- name: system.ticks
|
- name: system.ticks
|
||||||
type: long
|
type: long
|
||||||
description: >
|
description: >
|
||||||
Cpu system tikcs
|
CPU system ticks.
|
||||||
- name: user.pct
|
- name: user.pct
|
||||||
type: scaled_float
|
type: scaled_float
|
||||||
format: percentage
|
format: percentage
|
||||||
@ -27,12 +27,12 @@
|
|||||||
- name: user.ticks
|
- name: user.ticks
|
||||||
type: long
|
type: long
|
||||||
description: >
|
description: >
|
||||||
Cpu user tikcs
|
CPU user ticks
|
||||||
- name: total.pct
|
- name: total.pct
|
||||||
type: scaled_float
|
type: scaled_float
|
||||||
format: percentage
|
format: percentage
|
||||||
description: >
|
description: >
|
||||||
Total cpu usage.
|
Total CPU usage.
|
||||||
# TODO: how to document cpu list?
|
# TODO: how to document cpu list?
|
||||||
#- name: core
|
#- name: core
|
||||||
# type: list
|
# type: list
|
||||||
|
4
vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/_meta/docs.asciidoc
generated
vendored
4
vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/_meta/docs.asciidoc
generated
vendored
@ -1,3 +1,3 @@
|
|||||||
=== docker diskio MetricSet
|
=== Docker Diskio Metricset
|
||||||
|
|
||||||
This is the diskio metricset of the module docker.
|
The Docker `diskio` metricset collects disk I/O metrics.
|
4
vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/_meta/fields.yml
generated
vendored
4
vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/_meta/fields.yml
generated
vendored
@ -1,7 +1,7 @@
|
|||||||
- name: diskio
|
- name: diskio
|
||||||
type: group
|
type: group
|
||||||
description: >
|
description: >
|
||||||
Diskio metrics.
|
Disk I/O metrics.
|
||||||
fields:
|
fields:
|
||||||
- name: reads
|
- name: reads
|
||||||
type: scaled_float
|
type: scaled_float
|
||||||
@ -14,4 +14,4 @@
|
|||||||
- name: total
|
- name: total
|
||||||
type: scaled_float
|
type: scaled_float
|
||||||
description: >
|
description: >
|
||||||
Reads and writes numbers combined.
|
Number of reads and writes combined.
|
||||||
|
26
vendor/github.com/elastic/beats/metricbeat/module/docker/docker.go
generated
vendored
26
vendor/github.com/elastic/beats/metricbeat/module/docker/docker.go
generated
vendored
@ -64,17 +64,35 @@ func FetchStats(client *docker.Client) ([]Stat, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
containersList := []Stat{}
|
containersList := []Stat{}
|
||||||
|
queue := make(chan Stat, 1)
|
||||||
|
wg.Add(len(containers))
|
||||||
|
|
||||||
for _, container := range containers {
|
for _, container := range containers {
|
||||||
// This is currently very inefficient as docker calculates the average for each request,
|
go func(container docker.APIContainers) {
|
||||||
// means each request will take at least 2s: https://github.com/docker/docker/blob/master/cli/command/container/stats_helpers.go#L148
|
queue <- exportContainerStats(client, &container)
|
||||||
// Getting all stats at once is implemented here: https://github.com/docker/docker/pull/25361
|
}(container)
|
||||||
containersList = append(containersList, exportContainerStats(client, &container))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
for container := range queue {
|
||||||
|
containersList = append(containersList, container)
|
||||||
|
wg.Done()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
return containersList, err
|
return containersList, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// exportContainerStats loads stats for the given container
|
||||||
|
//
|
||||||
|
// This is currently very inefficient as docker calculates the average for each request,
|
||||||
|
// means each request will take at least 2s: https://github.com/docker/docker/blob/master/cli/command/container/stats_helpers.go#L148
|
||||||
|
// Getting all stats at once is implemented here: https://github.com/docker/docker/pull/25361
|
||||||
func exportContainerStats(client *docker.Client, container *docker.APIContainers) Stat {
|
func exportContainerStats(client *docker.Client, container *docker.APIContainers) Stat {
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
var event Stat
|
var event Stat
|
||||||
|
5
vendor/github.com/elastic/beats/metricbeat/module/docker/info/_meta/docs.asciidoc
generated
vendored
5
vendor/github.com/elastic/beats/metricbeat/module/docker/info/_meta/docs.asciidoc
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
=== docker info MetricSet
|
=== Docker Info Metricset
|
||||||
|
|
||||||
This is the info metricset of the module docker.
|
The Docker `info` metricset collects system-wide information based on the
|
||||||
|
https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/display-system-wide-information[Docker Remote API].
|
4
vendor/github.com/elastic/beats/metricbeat/module/docker/info/_meta/fields.yml
generated
vendored
4
vendor/github.com/elastic/beats/metricbeat/module/docker/info/_meta/fields.yml
generated
vendored
@ -3,7 +3,7 @@
|
|||||||
description: >
|
description: >
|
||||||
experimental[]
|
experimental[]
|
||||||
|
|
||||||
info metrics based on https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/display-system-wide-information
|
Info metrics based on https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/display-system-wide-information.
|
||||||
fields:
|
fields:
|
||||||
- name: containers
|
- name: containers
|
||||||
type: group
|
type: group
|
||||||
@ -29,7 +29,7 @@
|
|||||||
- name: id
|
- name: id
|
||||||
type: keyword
|
type: keyword
|
||||||
description: >
|
description: >
|
||||||
Unique docker host identifier.
|
Unique Docker host identifier.
|
||||||
|
|
||||||
- name: images
|
- name: images
|
||||||
type: long
|
type: long
|
||||||
|
4
vendor/github.com/elastic/beats/metricbeat/module/docker/memory/_meta/docs.asciidoc
generated
vendored
4
vendor/github.com/elastic/beats/metricbeat/module/docker/memory/_meta/docs.asciidoc
generated
vendored
@ -1,3 +1,3 @@
|
|||||||
=== docker memory MetricSet
|
=== Docker Memory Metricset
|
||||||
|
|
||||||
This is the memory metricset of the module docker.
|
The Docker `memory` metricset collects memory metrics.
|
2
vendor/github.com/elastic/beats/metricbeat/module/docker/memory/_meta/fields.yml
generated
vendored
2
vendor/github.com/elastic/beats/metricbeat/module/docker/memory/_meta/fields.yml
generated
vendored
@ -16,7 +16,7 @@
|
|||||||
- name: rss
|
- name: rss
|
||||||
type: group
|
type: group
|
||||||
description: >
|
description: >
|
||||||
Rss memory stats.
|
RSS memory stats.
|
||||||
fields:
|
fields:
|
||||||
- name: total
|
- name: total
|
||||||
type: long
|
type: long
|
||||||
|
4
vendor/github.com/elastic/beats/metricbeat/module/docker/network/_meta/docs.asciidoc
generated
vendored
4
vendor/github.com/elastic/beats/metricbeat/module/docker/network/_meta/docs.asciidoc
generated
vendored
@ -1,3 +1,3 @@
|
|||||||
=== docker network MetricSet
|
=== Docker Network Metricset
|
||||||
|
|
||||||
This is the network metricset of the module docker.
|
The Docker `network` metricset collects network metrics.
|
2
vendor/github.com/elastic/beats/metricbeat/module/docker/network/_meta/fields.yml
generated
vendored
2
vendor/github.com/elastic/beats/metricbeat/module/docker/network/_meta/fields.yml
generated
vendored
@ -1,7 +1,7 @@
|
|||||||
- name: network
|
- name: network
|
||||||
type: group
|
type: group
|
||||||
description: >
|
description: >
|
||||||
Netowrk metrics.
|
Network metrics.
|
||||||
fields:
|
fields:
|
||||||
|
|
||||||
- name: interface
|
- name: interface
|
||||||
|
17
vendor/github.com/elastic/beats/metricbeat/scripts/docs_collector.py
generated
vendored
17
vendor/github.com/elastic/beats/metricbeat/scripts/docs_collector.py
generated
vendored
@ -16,6 +16,8 @@ This file is generated! See scripts/docs_collector.py
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
modules_list = {}
|
||||||
|
|
||||||
# Iterate over all modules
|
# Iterate over all modules
|
||||||
for module in sorted(os.listdir(base_dir)):
|
for module in sorted(os.listdir(base_dir)):
|
||||||
|
|
||||||
@ -41,6 +43,8 @@ This file is generated! See scripts/docs_collector.py
|
|||||||
fields = yaml.load(f.read())
|
fields = yaml.load(f.read())
|
||||||
title = fields[0]["title"]
|
title = fields[0]["title"]
|
||||||
|
|
||||||
|
modules_list[module] = title
|
||||||
|
|
||||||
config_file = beat_path + "/config.yml"
|
config_file = beat_path + "/config.yml"
|
||||||
|
|
||||||
# Add example config file
|
# Add example config file
|
||||||
@ -131,6 +135,19 @@ For a description of each field in the metricset, see the
|
|||||||
with open(os.path.abspath("docs") + "/modules/" + module + ".asciidoc", 'w') as f:
|
with open(os.path.abspath("docs") + "/modules/" + module + ".asciidoc", 'w') as f:
|
||||||
f.write(module_file)
|
f.write(module_file)
|
||||||
|
|
||||||
|
module_list_output = generated_note
|
||||||
|
for m, title in sorted(modules_list.iteritems()):
|
||||||
|
module_list_output += " * <<metricbeat-module-" + m + "," + title + ">>\n"
|
||||||
|
|
||||||
|
module_list_output += "\n\n--\n\n"
|
||||||
|
for m, title in sorted(modules_list.iteritems()):
|
||||||
|
module_list_output += "include::modules/"+ m + ".asciidoc[]\n"
|
||||||
|
|
||||||
|
# Write module link list
|
||||||
|
with open(os.path.abspath("docs") + "/modules_list.asciidoc", 'w') as f:
|
||||||
|
f.write(module_list_output)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description="Collects modules docs")
|
description="Collects modules docs")
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
"description": "",
|
"description": "",
|
||||||
"title": "Packetbeat Cassandra",
|
"title": "Packetbeat Cassandra",
|
||||||
"uiStateJSON": "{\"P-10\":{\"vis\":{\"legendOpen\":false}},\"P-17\":{\"vis\":{\"legendOpen\":false}},\"P-18\":{\"vis\":{\"legendOpen\":false}}}",
|
"uiStateJSON": "{\"P-10\":{\"vis\":{\"legendOpen\":false}},\"P-17\":{\"vis\":{\"legendOpen\":false}},\"P-18\":{\"vis\":{\"legendOpen\":false}}}",
|
||||||
"panelsJSON": "[{\"col\":10,\"id\":\"Cassandra:-ResponseKeyspace\",\"panelIndex\":3,\"row\":3,\"size_x\":3,\"size_y\":2,\"type\":\"visualization\"},{\"col\":7,\"id\":\"Cassandra:-ResponseType\",\"panelIndex\":4,\"row\":3,\"size_x\":3,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Cassandra:-ResponseTime\",\"panelIndex\":9,\"row\":5,\"size_x\":12,\"size_y\":2,\"type\":\"visualization\"},{\"col\":4,\"id\":\"Cassandra:-RequestCount\",\"panelIndex\":10,\"row\":1,\"size_x\":9,\"size_y\":2,\"type\":\"visualization\"},{\"col\":4,\"id\":\"Cassandra:-Ops\",\"panelIndex\":11,\"row\":3,\"size_x\":3,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Cassandra:-RequestCountStackByType\",\"panelIndex\":15,\"row\":7,\"size_x\":12,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Cassandra:-ResponseCountStackByType\",\"panelIndex\":16,\"row\":9,\"size_x\":12,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Cassandra:-RequestCountByType\",\"panelIndex\":17,\"row\":11,\"size_x\":6,\"size_y\":3,\"type\":\"visualization\"},{\"col\":7,\"id\":\"Cassandra:-ResponseCountByType\",\"panelIndex\":18,\"row\":11,\"size_x\":6,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Navigation\",\"panelIndex\":19,\"row\":1,\"size_x\":3,\"size_y\":4,\"type\":\"visualization\"},{\"id\":\"Cassandra:-QueryView\",\"type\":\"search\",\"panelIndex\":20,\"size_x\":12,\"size_y\":3,\"col\":1,\"row\":14,\"columns\":[\"cassandra.request.query\",\"cassandra.response.result.rows.meta.keyspace\",\"cassandra.response.result.rows.meta.table\",\"cassandra.response.result.rows.num_rows\"],\"sort\":[\"@timestamp\",\"desc\"]}]",
|
"panelsJSON": "[{\"col\":10,\"id\":\"Cassandra-ResponseKeyspace\",\"panelIndex\":3,\"row\":3,\"size_x\":3,\"size_y\":2,\"type\":\"visualization\"},{\"col\":7,\"id\":\"Cassandra-ResponseType\",\"panelIndex\":4,\"row\":3,\"size_x\":3,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Cassandra-ResponseTime\",\"panelIndex\":9,\"row\":5,\"size_x\":12,\"size_y\":2,\"type\":\"visualization\"},{\"col\":4,\"id\":\"Cassandra-RequestCount\",\"panelIndex\":10,\"row\":1,\"size_x\":9,\"size_y\":2,\"type\":\"visualization\"},{\"col\":4,\"id\":\"Cassandra-Ops\",\"panelIndex\":11,\"row\":3,\"size_x\":3,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Cassandra-RequestCountStackByType\",\"panelIndex\":15,\"row\":7,\"size_x\":12,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Cassandra-ResponseCountStackByType\",\"panelIndex\":16,\"row\":9,\"size_x\":12,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Cassandra-RequestCountByType\",\"panelIndex\":17,\"row\":11,\"size_x\":6,\"size_y\":3,\"type\":\"visualization\"},{\"col\":7,\"id\":\"Cassandra-ResponseCountByType\",\"panelIndex\":18,\"row\":11,\"size_x\":6,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Navigation\",\"panelIndex\":19,\"row\":1,\"size_x\":3,\"size_y\":4,\"type\":\"visualization\"},{\"id\":\"Cassandra-QueryView\",\"type\":\"search\",\"panelIndex\":20,\"size_x\":12,\"size_y\":3,\"col\":1,\"row\":14,\"columns\":[\"cassandra.request.query\",\"cassandra.response.result.rows.meta.keyspace\",\"cassandra.response.result.rows.meta.table\",\"cassandra.response.result.rows.num_rows\"],\"sort\":[\"@timestamp\",\"desc\"]}]",
|
||||||
"optionsJSON": "{\"darkTheme\":false}",
|
"optionsJSON": "{\"darkTheme\":false}",
|
||||||
"version": 1,
|
"version": 1,
|
||||||
"kibanaSavedObjectMeta": {
|
"kibanaSavedObjectMeta": {
|
||||||
|
2
vendor/github.com/elastic/beats/packetbeat/docs/packetbeat-filtering.asciidoc
generated
vendored
2
vendor/github.com/elastic/beats/packetbeat/docs/packetbeat-filtering.asciidoc
generated
vendored
@ -8,7 +8,7 @@ requests and their response codes are reported:
|
|||||||
|
|
||||||
[source, yaml]
|
[source, yaml]
|
||||||
-----------------------------------------------------
|
-----------------------------------------------------
|
||||||
filters:
|
processors:
|
||||||
- include_fields:
|
- include_fields:
|
||||||
fields:
|
fields:
|
||||||
- bytes_in
|
- bytes_in
|
||||||
|
2
vendor/github.com/elastic/beats/packetbeat/docs/reference/configuration.asciidoc
generated
vendored
2
vendor/github.com/elastic/beats/packetbeat/docs/reference/configuration.asciidoc
generated
vendored
@ -13,7 +13,6 @@ configuration settings, you need to restart {beatname_uc} to pick up the changes
|
|||||||
* <<configuration-protocols>>
|
* <<configuration-protocols>>
|
||||||
* <<configuration-processes>>
|
* <<configuration-processes>>
|
||||||
* <<configuration-general>>
|
* <<configuration-general>>
|
||||||
* <<configuration-processors>>
|
|
||||||
* <<elasticsearch-output>>
|
* <<elasticsearch-output>>
|
||||||
* <<logstash-output>>
|
* <<logstash-output>>
|
||||||
* <<kafka-output>>
|
* <<kafka-output>>
|
||||||
@ -24,6 +23,7 @@ configuration settings, you need to restart {beatname_uc} to pick up the changes
|
|||||||
* <<configuration-path>>
|
* <<configuration-path>>
|
||||||
* <<configuration-logging>>
|
* <<configuration-logging>>
|
||||||
* <<configuration-run-options>>
|
* <<configuration-run-options>>
|
||||||
|
* <<configuration-processors>>
|
||||||
|
|
||||||
NOTE: Packetbeat maintains a real-time topology map of all the servers in your network.
|
NOTE: Packetbeat maintains a real-time topology map of all the servers in your network.
|
||||||
See <<maintaining-topology>> for more details.
|
See <<maintaining-topology>> for more details.
|
||||||
|
@ -794,8 +794,6 @@ process' command line as read from `/proc/<pid>/cmdline`.
|
|||||||
|
|
||||||
include::../../../../libbeat/docs/generalconfig.asciidoc[]
|
include::../../../../libbeat/docs/generalconfig.asciidoc[]
|
||||||
|
|
||||||
include::../../../../libbeat/docs/processors-config.asciidoc[]
|
|
||||||
|
|
||||||
include::../../../../libbeat/docs/outputconfig.asciidoc[]
|
include::../../../../libbeat/docs/outputconfig.asciidoc[]
|
||||||
|
|
||||||
include::../../../../libbeat/docs/shared-path-config.asciidoc[]
|
include::../../../../libbeat/docs/shared-path-config.asciidoc[]
|
||||||
@ -804,3 +802,6 @@ include::../../../../libbeat/docs/loggingconfig.asciidoc[]
|
|||||||
|
|
||||||
include::./runconfig.asciidoc[]
|
include::./runconfig.asciidoc[]
|
||||||
|
|
||||||
|
include::../../../../libbeat/docs/processors-config.asciidoc[]
|
||||||
|
|
||||||
|
|
||||||
|
1
vendor/github.com/elastic/beats/packetbeat/docs/version.asciidoc
generated
vendored
1
vendor/github.com/elastic/beats/packetbeat/docs/version.asciidoc
generated
vendored
@ -1,2 +1,3 @@
|
|||||||
:stack-version: 5.1.1
|
:stack-version: 5.1.1
|
||||||
:doc-branch: 5.1
|
:doc-branch: 5.1
|
||||||
|
:go-version: 1.7.1
|
||||||
|
2
vendor/github.com/elastic/beats/packetbeat/packetbeat.template-es2x.json
generated
vendored
2
vendor/github.com/elastic/beats/packetbeat/packetbeat.template-es2x.json
generated
vendored
@ -7,7 +7,7 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"_meta": {
|
"_meta": {
|
||||||
"version": "5.1.1"
|
"version": "5.1.2"
|
||||||
},
|
},
|
||||||
"dynamic_templates": [
|
"dynamic_templates": [
|
||||||
{
|
{
|
||||||
|
2
vendor/github.com/elastic/beats/packetbeat/packetbeat.template.json
generated
vendored
2
vendor/github.com/elastic/beats/packetbeat/packetbeat.template.json
generated
vendored
@ -5,7 +5,7 @@
|
|||||||
"norms": false
|
"norms": false
|
||||||
},
|
},
|
||||||
"_meta": {
|
"_meta": {
|
||||||
"version": "5.1.1"
|
"version": "5.1.2"
|
||||||
},
|
},
|
||||||
"dynamic_templates": [
|
"dynamic_templates": [
|
||||||
{
|
{
|
||||||
|
4
vendor/github.com/elastic/beats/testing/environments/base.yml
generated
vendored
4
vendor/github.com/elastic/beats/testing/environments/base.yml
generated
vendored
@ -8,17 +8,14 @@ services:
|
|||||||
- logstash
|
- logstash
|
||||||
environment:
|
environment:
|
||||||
- LS_HOST=logstash
|
- LS_HOST=logstash
|
||||||
container_name: beat
|
|
||||||
|
|
||||||
elasticsearch:
|
elasticsearch:
|
||||||
image: elasticsearch:latest
|
image: elasticsearch:latest
|
||||||
container_name: elasticsearch
|
|
||||||
|
|
||||||
logstash:
|
logstash:
|
||||||
image: logstash:latest
|
image: logstash:latest
|
||||||
links:
|
links:
|
||||||
- elasticsearch
|
- elasticsearch
|
||||||
container_name: logstash
|
|
||||||
environment:
|
environment:
|
||||||
- ES_HOST=elasticsearch
|
- ES_HOST=elasticsearch
|
||||||
|
|
||||||
@ -28,4 +25,3 @@ services:
|
|||||||
- elasticsearch
|
- elasticsearch
|
||||||
environment:
|
environment:
|
||||||
- ELASTICSEARCH_URL=http://elasticsearch:9200/
|
- ELASTICSEARCH_URL=http://elasticsearch:9200/
|
||||||
container_name: kibana
|
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
FROM docker.elastic.co/kibana/kibana-ubuntu-base:latest
|
FROM docker.elastic.co/kibana/kibana-ubuntu-base:latest
|
||||||
MAINTAINER Elastic Docker Team <docker@elastic.co>
|
MAINTAINER Elastic Docker Team <docker@elastic.co>
|
||||||
|
|
||||||
ARG KIBANA_DOWNLOAD_URL=https://staging.elastic.co/5.1.1-acecfcb6/downloads/kibana/kibana-5.1.0-linux-x86_64.tar.gz
|
ARG KIBANA_DOWNLOAD_URL=https://staging.elastic.co/5.1.2-429c1ec3/downloads/kibana/kibana-5.1.2-linux-x86_64.tar.gz
|
||||||
ARG X_PACK_URL
|
ARG X_PACK_URL
|
||||||
|
|
||||||
EXPOSE 5601
|
EXPOSE 5601
|
||||||
|
@ -2,8 +2,8 @@ FROM java:8-jre
|
|||||||
|
|
||||||
ENV LS_VERSION 5
|
ENV LS_VERSION 5
|
||||||
|
|
||||||
ENV VERSION 5.1.1
|
ENV VERSION 5.1.2
|
||||||
ENV URL https://staging.elastic.co/5.1.1-acecfcb6/downloads/logstash/logstash-${VERSION}.tar.gz
|
ENV URL https://staging.elastic.co/5.1.2-429c1ec3/downloads/logstash/logstash-5.1.2.tar.gz
|
||||||
ENV PATH $PATH:/opt/logstash-$VERSION/bin
|
ENV PATH $PATH:/opt/logstash-$VERSION/bin
|
||||||
|
|
||||||
# Cache variable can be set during building to invalidate the build cache with `--build-arg CACHE=$(date +%s) .`
|
# Cache variable can be set during building to invalidate the build cache with `--build-arg CACHE=$(date +%s) .`
|
||||||
|
4
vendor/github.com/elastic/beats/testing/environments/snapshot.yml
generated
vendored
4
vendor/github.com/elastic/beats/testing/environments/snapshot.yml
generated
vendored
@ -7,8 +7,8 @@ services:
|
|||||||
context: ./docker/elasticsearch
|
context: ./docker/elasticsearch
|
||||||
dockerfile: Dockerfile-snapshot
|
dockerfile: Dockerfile-snapshot
|
||||||
args:
|
args:
|
||||||
ELASTIC_VERSION: 5.1.1
|
ELASTIC_VERSION: 5.1.2
|
||||||
ES_DOWNLOAD_URL: 'https://staging.elastic.co/5.1.1-acecfcb6/downloads/elasticsearch'
|
ES_DOWNLOAD_URL: 'https://staging.elastic.co/5.1.2-429c1ec3/downloads/elasticsearch'
|
||||||
#XPACK: http://snapshots.elastic.co/downloads/packs/x-pack/x-pack-6.0.0-alpha1-SNAPSHOT.zip
|
#XPACK: http://snapshots.elastic.co/downloads/packs/x-pack/x-pack-6.0.0-alpha1-SNAPSHOT.zip
|
||||||
environment:
|
environment:
|
||||||
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
|
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
|
||||||
|
2
vendor/github.com/elastic/beats/winlogbeat/docs/reference/configuration.asciidoc
generated
vendored
2
vendor/github.com/elastic/beats/winlogbeat/docs/reference/configuration.asciidoc
generated
vendored
@ -11,7 +11,6 @@ configuration settings, you need to restart {beatname_uc} to pick up the changes
|
|||||||
|
|
||||||
* <<configuration-winlogbeat-options>>
|
* <<configuration-winlogbeat-options>>
|
||||||
* <<configuration-general>>
|
* <<configuration-general>>
|
||||||
* <<configuration-processors>>
|
|
||||||
* <<elasticsearch-output>>
|
* <<elasticsearch-output>>
|
||||||
* <<logstash-output>>
|
* <<logstash-output>>
|
||||||
* <<kafka-output>>
|
* <<kafka-output>>
|
||||||
@ -21,6 +20,7 @@ configuration settings, you need to restart {beatname_uc} to pick up the changes
|
|||||||
* <<configuration-output-ssl>>
|
* <<configuration-output-ssl>>
|
||||||
* <<configuration-path>>
|
* <<configuration-path>>
|
||||||
* <<configuration-logging>>
|
* <<configuration-logging>>
|
||||||
|
* <<configuration-processors>>
|
||||||
|
|
||||||
include::configuration/winlogbeat-options.asciidoc[]
|
include::configuration/winlogbeat-options.asciidoc[]
|
||||||
|
|
||||||
|
@ -163,6 +163,38 @@ winlogbeat.event_logs:
|
|||||||
event_id: 4624, 4625, 4700-4800, -4735
|
event_id: 4624, 4625, 4700-4800, -4735
|
||||||
--------------------------------------------------------------------------------
|
--------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
[WARNING]
|
||||||
|
=======================================
|
||||||
|
If you specify more that 22 event IDs to include or 22 event IDs to exclude,
|
||||||
|
Windows will prevent Winlogbeat from reading the event log because it limits the
|
||||||
|
number of conditions that can be used in an event log query. If this occurs a similar
|
||||||
|
warning as shown below will be logged by Winlogbeat, and it will continue
|
||||||
|
processing data from other event logs. For more information, see
|
||||||
|
https://support.microsoft.com/en-us/kb/970453.
|
||||||
|
|
||||||
|
`WARN EventLog[Application] Open() error. No events will be read from this
|
||||||
|
source. The specified query is invalid.`
|
||||||
|
|
||||||
|
If you have more than 22 event IDs, you can workaround this Windows limitation
|
||||||
|
by using a drop_event[drop-event] processor to do the filtering after
|
||||||
|
Winlogbeat has received the events from Windows. The filter shown below is
|
||||||
|
equivalent to `event_id: 903, 1024, 4624` but can be expanded beyond 22
|
||||||
|
event IDs.
|
||||||
|
|
||||||
|
[source,yaml]
|
||||||
|
--------------------------------------------------------------------------------
|
||||||
|
processors:
|
||||||
|
- drop_event.when.and:
|
||||||
|
- equals.log_name: Security
|
||||||
|
- not.or:
|
||||||
|
- equals.event_id: 903
|
||||||
|
- equals.event_id: 1024
|
||||||
|
- equals.event_id: 4624
|
||||||
|
--------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
=======================================
|
||||||
|
|
||||||
|
|
||||||
===== event_logs.level
|
===== event_logs.level
|
||||||
|
|
||||||
A list of event levels to include. The value is a comma-separated list of
|
A list of event levels to include. The value is a comma-separated list of
|
||||||
@ -319,10 +351,11 @@ The metrics are served as a JSON document. The metrics include:
|
|||||||
|
|
||||||
include::../../../../libbeat/docs/generalconfig.asciidoc[]
|
include::../../../../libbeat/docs/generalconfig.asciidoc[]
|
||||||
|
|
||||||
include::../../../../libbeat/docs/processors-config.asciidoc[]
|
|
||||||
|
|
||||||
include::../../../../libbeat/docs/outputconfig.asciidoc[]
|
include::../../../../libbeat/docs/outputconfig.asciidoc[]
|
||||||
|
|
||||||
include::../../../../libbeat/docs/shared-path-config.asciidoc[]
|
include::../../../../libbeat/docs/shared-path-config.asciidoc[]
|
||||||
|
|
||||||
include::../../../../libbeat/docs/loggingconfig.asciidoc[]
|
include::../../../../libbeat/docs/loggingconfig.asciidoc[]
|
||||||
|
|
||||||
|
include::../../../../libbeat/docs/processors-config.asciidoc[]
|
||||||
|
|
||||||
|
1
vendor/github.com/elastic/beats/winlogbeat/docs/version.asciidoc
generated
vendored
1
vendor/github.com/elastic/beats/winlogbeat/docs/version.asciidoc
generated
vendored
@ -1,2 +1,3 @@
|
|||||||
:stack-version: 5.1.1
|
:stack-version: 5.1.1
|
||||||
:doc-branch: 5.1
|
:doc-branch: 5.1
|
||||||
|
:go-version: 1.7.1
|
||||||
|
137
vendor/github.com/elastic/beats/winlogbeat/eventlog/bench_test.go
generated
vendored
Normal file
137
vendor/github.com/elastic/beats/winlogbeat/eventlog/bench_test.go
generated
vendored
Normal file
@ -0,0 +1,137 @@
|
|||||||
|
// +build windows
|
||||||
|
|
||||||
|
package eventlog
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"math/rand"
|
||||||
|
"os/exec"
|
||||||
|
"strconv"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
elog "github.com/andrewkroh/sys/windows/svc/eventlog"
|
||||||
|
"github.com/dustin/go-humanize"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Benchmark tests with customized output. (`go test -v -benchtime 10s -benchtest .`)
|
||||||
|
|
||||||
|
var (
|
||||||
|
benchTest = flag.Bool("benchtest", false, "Run benchmarks for the eventlog package")
|
||||||
|
injectAmount = flag.Int("inject", 50000, "Number of events to inject before running benchmarks")
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestBenchmarkBatchReadSize tests the performance of different
|
||||||
|
// batch_read_size values.
|
||||||
|
func TestBenchmarkBatchReadSize(t *testing.T) {
|
||||||
|
if !*benchTest {
|
||||||
|
t.Skip("-benchtest not enabled")
|
||||||
|
}
|
||||||
|
|
||||||
|
log, err := initLog(providerName, sourceName, eventCreateMsgFile)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
err := uninstallLog(providerName, sourceName, log)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Increase the log size so that it can hold these large events.
|
||||||
|
output, err := exec.Command("wevtutil.exe", "sl", "/ms:1073741824", providerName).CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err, string(output))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Publish test messages:
|
||||||
|
for i := 0; i < *injectAmount; i++ {
|
||||||
|
err = log.Report(elog.Info, uint32(rng.Int63()%1000), []string{strconv.Itoa(i) + " " + randString(256)})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("ReportEvent error", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
setup := func(t testing.TB, batchReadSize int) (EventLog, func()) {
|
||||||
|
eventlog, err := newWinEventLog(map[string]interface{}{"name": providerName, "batch_read_size": batchReadSize})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
err = eventlog.Open(0)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
return eventlog, func() {
|
||||||
|
err := eventlog.Close()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
benchTest := func(batchSize int) {
|
||||||
|
var err error
|
||||||
|
result := testing.Benchmark(func(b *testing.B) {
|
||||||
|
eventlog, tearDown := setup(b, batchSize)
|
||||||
|
defer tearDown()
|
||||||
|
b.ResetTimer()
|
||||||
|
|
||||||
|
// Each iteration reads one batch.
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
_, err = eventlog.Read()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("batch_size=%v, total_events=%v, batch_time=%v, events_per_sec=%v, bytes_alloced_per_event=%v, total_allocs=%v",
|
||||||
|
batchSize,
|
||||||
|
result.N*batchSize,
|
||||||
|
time.Duration(result.NsPerOp()),
|
||||||
|
float64(batchSize)/time.Duration(result.NsPerOp()).Seconds(),
|
||||||
|
humanize.Bytes(result.MemBytes/(uint64(result.N)*uint64(batchSize))),
|
||||||
|
result.MemAllocs)
|
||||||
|
}
|
||||||
|
|
||||||
|
benchTest(10)
|
||||||
|
benchTest(100)
|
||||||
|
benchTest(500)
|
||||||
|
benchTest(1000)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Utility Functions
|
||||||
|
|
||||||
|
var rng = rand.NewSource(time.Now().UnixNano())
|
||||||
|
|
||||||
|
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||||
|
const (
|
||||||
|
letterIdxBits = 6 // 6 bits to represent a letter index
|
||||||
|
letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
|
||||||
|
letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits
|
||||||
|
)
|
||||||
|
|
||||||
|
// https://stackoverflow.com/questions/22892120/how-to-generate-a-random-string-of-a-fixed-length-in-golang
|
||||||
|
func randString(n int) string {
|
||||||
|
b := make([]byte, n)
|
||||||
|
// A src.Int63() generates 63 random bits, enough for letterIdxMax characters!
|
||||||
|
for i, cache, remain := n-1, rng.Int63(), letterIdxMax; i >= 0; {
|
||||||
|
if remain == 0 {
|
||||||
|
cache, remain = rng.Int63(), letterIdxMax
|
||||||
|
}
|
||||||
|
if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
|
||||||
|
b[i] = letterBytes[idx]
|
||||||
|
i--
|
||||||
|
}
|
||||||
|
cache >>= letterIdxBits
|
||||||
|
remain--
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(b)
|
||||||
|
}
|
22
vendor/github.com/elastic/beats/winlogbeat/eventlog/eventlog.go
generated
vendored
22
vendor/github.com/elastic/beats/winlogbeat/eventlog/eventlog.go
generated
vendored
@ -5,6 +5,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
"github.com/elastic/beats/libbeat/common"
|
"github.com/elastic/beats/libbeat/common"
|
||||||
"github.com/elastic/beats/libbeat/logp"
|
"github.com/elastic/beats/libbeat/logp"
|
||||||
@ -23,9 +24,14 @@ var (
|
|||||||
detailf = logp.MakeDebug(detailSelector)
|
detailf = logp.MakeDebug(detailSelector)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
// dropReasons contains counters for the number of dropped events for each
|
// dropReasons contains counters for the number of dropped events for each
|
||||||
// reason.
|
// reason.
|
||||||
var dropReasons = expvar.NewMap("drop_reasons")
|
dropReasons = expvar.NewMap("drop_reasons")
|
||||||
|
|
||||||
|
// readErrors contains counters for the read error types that occur.
|
||||||
|
readErrors = expvar.NewMap("read_errors")
|
||||||
|
)
|
||||||
|
|
||||||
// EventLog is an interface to a Windows Event Log.
|
// EventLog is an interface to a Windows Event Log.
|
||||||
type EventLog interface {
|
type EventLog interface {
|
||||||
@ -177,3 +183,17 @@ func isZero(i interface{}) bool {
|
|||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// incrementMetric increments a value in the specified expvar.Map. The key
|
||||||
|
// should be a windows syscall.Errno or a string. Any other types will be
|
||||||
|
// reported under the "other" key.
|
||||||
|
func incrementMetric(v *expvar.Map, key interface{}) {
|
||||||
|
switch t := key.(type) {
|
||||||
|
default:
|
||||||
|
v.Add("other", 1)
|
||||||
|
case string:
|
||||||
|
v.Add(t, 1)
|
||||||
|
case syscall.Errno:
|
||||||
|
v.Add(strconv.Itoa(int(t)), 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
1
vendor/github.com/elastic/beats/winlogbeat/eventlog/eventlogging.go
generated
vendored
1
vendor/github.com/elastic/beats/winlogbeat/eventlog/eventlogging.go
generated
vendored
@ -195,6 +195,7 @@ func (l *eventLogging) Close() error {
|
|||||||
// by attempting to correct the error through closing and reopening the event
|
// by attempting to correct the error through closing and reopening the event
|
||||||
// log.
|
// log.
|
||||||
func (l *eventLogging) readRetryErrorHandler(err error) error {
|
func (l *eventLogging) readRetryErrorHandler(err error) error {
|
||||||
|
incrementMetric(readErrors, err)
|
||||||
if errno, ok := err.(syscall.Errno); ok {
|
if errno, ok := err.(syscall.Errno); ok {
|
||||||
var reopen bool
|
var reopen bool
|
||||||
|
|
||||||
|
14
vendor/github.com/elastic/beats/winlogbeat/eventlog/eventlogging_test.go
generated
vendored
14
vendor/github.com/elastic/beats/winlogbeat/eventlog/eventlogging_test.go
generated
vendored
@ -4,6 +4,8 @@ package eventlog
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os/exec"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
@ -35,6 +37,8 @@ const (
|
|||||||
|
|
||||||
const allLevels = elog.Success | elog.AuditFailure | elog.AuditSuccess | elog.Error | elog.Info | elog.Warning
|
const allLevels = elog.Success | elog.AuditFailure | elog.AuditSuccess | elog.Error | elog.Info | elog.Warning
|
||||||
|
|
||||||
|
const gigabyte = 1 << 30
|
||||||
|
|
||||||
// Test messages.
|
// Test messages.
|
||||||
var messages = map[uint32]struct {
|
var messages = map[uint32]struct {
|
||||||
eventType uint16
|
eventType uint16
|
||||||
@ -72,7 +76,7 @@ var oneTimeLogpInit sync.Once
|
|||||||
func configureLogp() {
|
func configureLogp() {
|
||||||
oneTimeLogpInit.Do(func() {
|
oneTimeLogpInit.Do(func() {
|
||||||
if testing.Verbose() {
|
if testing.Verbose() {
|
||||||
logp.LogInit(logp.LOG_DEBUG, "", false, true, []string{"eventlog", "eventlog_detail"})
|
logp.LogInit(logp.LOG_DEBUG, "", false, true, []string{"eventlog"})
|
||||||
logp.Info("DEBUG enabled for eventlog.")
|
logp.Info("DEBUG enabled for eventlog.")
|
||||||
} else {
|
} else {
|
||||||
logp.LogInit(logp.LOG_WARNING, "", false, true, []string{})
|
logp.LogInit(logp.LOG_WARNING, "", false, true, []string{})
|
||||||
@ -143,6 +147,14 @@ func uninstallLog(provider, source string, log *elog.Log) error {
|
|||||||
return errs.Err()
|
return errs.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// setLogSize set the maximum number of bytes that an event log can hold.
|
||||||
|
func setLogSize(t testing.TB, provider string, sizeBytes int) {
|
||||||
|
output, err := exec.Command("wevtutil.exe", "sl", "/ms:"+strconv.Itoa(sizeBytes), providerName).CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("failed to set log size", err, string(output))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Verify that all messages are read from the event log.
|
// Verify that all messages are read from the event log.
|
||||||
func TestRead(t *testing.T) {
|
func TestRead(t *testing.T) {
|
||||||
|
|
||||||
|
61
vendor/github.com/elastic/beats/winlogbeat/eventlog/wineventlog.go
generated
vendored
61
vendor/github.com/elastic/beats/winlogbeat/eventlog/wineventlog.go
generated
vendored
@ -4,7 +4,6 @@ package eventlog
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"strconv"
|
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -13,6 +12,7 @@ import (
|
|||||||
"github.com/elastic/beats/winlogbeat/sys"
|
"github.com/elastic/beats/winlogbeat/sys"
|
||||||
win "github.com/elastic/beats/winlogbeat/sys/wineventlog"
|
win "github.com/elastic/beats/winlogbeat/sys/wineventlog"
|
||||||
"github.com/joeshaw/multierror"
|
"github.com/joeshaw/multierror"
|
||||||
|
"github.com/pkg/errors"
|
||||||
"golang.org/x/sys/windows"
|
"golang.org/x/sys/windows"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -73,6 +73,7 @@ type winEventLog struct {
|
|||||||
channelName string // Name of the channel from which to read.
|
channelName string // Name of the channel from which to read.
|
||||||
subscription win.EvtHandle // Handle to the subscription.
|
subscription win.EvtHandle // Handle to the subscription.
|
||||||
maxRead int // Maximum number returned in one Read.
|
maxRead int // Maximum number returned in one Read.
|
||||||
|
lastRead uint64 // Record number of the last read event.
|
||||||
|
|
||||||
render func(event win.EvtHandle) (string, error) // Function for rendering the event to XML.
|
render func(event win.EvtHandle) (string, error) // Function for rendering the event to XML.
|
||||||
renderBuf []byte // Buffer used for rendering event.
|
renderBuf []byte // Buffer used for rendering event.
|
||||||
@ -118,13 +119,8 @@ func (l *winEventLog) Open(recordNumber uint64) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (l *winEventLog) Read() ([]Record, error) {
|
func (l *winEventLog) Read() ([]Record, error) {
|
||||||
handles, err := win.EventHandles(l.subscription, l.maxRead)
|
handles, _, err := l.eventHandles(l.maxRead)
|
||||||
if err == win.ERROR_NO_MORE_ITEMS {
|
if err != nil || len(handles) == 0 {
|
||||||
detailf("%s No more events", l.logPrefix)
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
logp.Warn("%s EventHandles returned error %v", l.logPrefix, err)
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
@ -145,17 +141,18 @@ func (l *winEventLog) Read() ([]Record, error) {
|
|||||||
}
|
}
|
||||||
if err != nil && x == "" {
|
if err != nil && x == "" {
|
||||||
logp.Err("%s Dropping event with rendering error. %v", l.logPrefix, err)
|
logp.Err("%s Dropping event with rendering error. %v", l.logPrefix, err)
|
||||||
reportDrop(err)
|
incrementMetric(dropReasons, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
r, err := l.buildRecordFromXML(x, err)
|
r, err := l.buildRecordFromXML(x, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logp.Err("%s Dropping event. %v", l.logPrefix, err)
|
logp.Err("%s Dropping event. %v", l.logPrefix, err)
|
||||||
reportDrop("unmarshal")
|
incrementMetric(dropReasons, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
records = append(records, r)
|
records = append(records, r)
|
||||||
|
l.lastRead = r.RecordID
|
||||||
}
|
}
|
||||||
|
|
||||||
debugf("%s Read() is returning %d records", l.logPrefix, len(records))
|
debugf("%s Read() is returning %d records", l.logPrefix, len(records))
|
||||||
@ -167,6 +164,34 @@ func (l *winEventLog) Close() error {
|
|||||||
return win.Close(l.subscription)
|
return win.Close(l.subscription)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (l *winEventLog) eventHandles(maxRead int) ([]win.EvtHandle, int, error) {
|
||||||
|
handles, err := win.EventHandles(l.subscription, maxRead)
|
||||||
|
switch err {
|
||||||
|
case nil:
|
||||||
|
if l.maxRead > maxRead {
|
||||||
|
debugf("%s Recovered from RPC_S_INVALID_BOUND error (errno 1734) "+
|
||||||
|
"by decreasing batch_read_size to %v", l.logPrefix, maxRead)
|
||||||
|
}
|
||||||
|
return handles, maxRead, nil
|
||||||
|
case win.ERROR_NO_MORE_ITEMS:
|
||||||
|
detailf("%s No more events", l.logPrefix)
|
||||||
|
return nil, maxRead, nil
|
||||||
|
case win.RPC_S_INVALID_BOUND:
|
||||||
|
incrementMetric(readErrors, err)
|
||||||
|
if err := l.Close(); err != nil {
|
||||||
|
return nil, 0, errors.Wrap(err, "failed to recover from RPC_S_INVALID_BOUND")
|
||||||
|
}
|
||||||
|
if err := l.Open(l.lastRead); err != nil {
|
||||||
|
return nil, 0, errors.Wrap(err, "failed to recover from RPC_S_INVALID_BOUND")
|
||||||
|
}
|
||||||
|
return l.eventHandles(maxRead / 2)
|
||||||
|
default:
|
||||||
|
incrementMetric(readErrors, err)
|
||||||
|
logp.Warn("%s EventHandles returned error %v", l.logPrefix, err)
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (l *winEventLog) buildRecordFromXML(x string, recoveredErr error) (Record, error) {
|
func (l *winEventLog) buildRecordFromXML(x string, recoveredErr error) (Record, error) {
|
||||||
e, err := sys.UnmarshalEventXML([]byte(x))
|
e, err := sys.UnmarshalEventXML([]byte(x))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -204,20 +229,6 @@ func (l *winEventLog) buildRecordFromXML(x string, recoveredErr error) (Record,
|
|||||||
return r, nil
|
return r, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// reportDrop reports a dropped event log record and the reason as an expvar
|
|
||||||
// metric. The reason should be a windows syscall.Errno or a string. Any other
|
|
||||||
// types will be reported under the "other" key.
|
|
||||||
func reportDrop(reason interface{}) {
|
|
||||||
switch t := reason.(type) {
|
|
||||||
default:
|
|
||||||
dropReasons.Add("other", 1)
|
|
||||||
case string:
|
|
||||||
dropReasons.Add(t, 1)
|
|
||||||
case syscall.Errno:
|
|
||||||
dropReasons.Add(strconv.Itoa(int(t)), 1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// newWinEventLog creates and returns a new EventLog for reading event logs
|
// newWinEventLog creates and returns a new EventLog for reading event logs
|
||||||
// using the Windows Event Log.
|
// using the Windows Event Log.
|
||||||
func newWinEventLog(options map[string]interface{}) (EventLog, error) {
|
func newWinEventLog(options map[string]interface{}) (EventLog, error) {
|
||||||
@ -283,7 +294,7 @@ func newWinEventLog(options map[string]interface{}) (EventLog, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
// Register eventlogging API if it is available.
|
// Register wineventlog API if it is available.
|
||||||
available, _ := win.IsAvailable()
|
available, _ := win.IsAvailable()
|
||||||
if available {
|
if available {
|
||||||
Register(winEventLogAPIName, 0, newWinEventLog, win.Channels)
|
Register(winEventLogAPIName, 0, newWinEventLog, win.Channels)
|
||||||
|
69
vendor/github.com/elastic/beats/winlogbeat/eventlog/wineventlog_test.go
generated
vendored
69
vendor/github.com/elastic/beats/winlogbeat/eventlog/wineventlog_test.go
generated
vendored
@ -3,8 +3,11 @@
|
|||||||
package eventlog
|
package eventlog
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"expvar"
|
||||||
|
"strconv"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
elog "github.com/andrewkroh/sys/windows/svc/eventlog"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -52,3 +55,69 @@ func TestWinEventLogBatchReadSize(t *testing.T) {
|
|||||||
|
|
||||||
assert.Len(t, records, batchReadSize)
|
assert.Len(t, records, batchReadSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestReadLargeBatchSize tests reading from an event log using a large
|
||||||
|
// read_batch_size parameter. When combined with large messages this causes
|
||||||
|
// EvtNext (wineventlog.EventRecords) to fail with RPC_S_INVALID_BOUND error.
|
||||||
|
func TestReadLargeBatchSize(t *testing.T) {
|
||||||
|
configureLogp()
|
||||||
|
log, err := initLog(providerName, sourceName, eventCreateMsgFile)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
err := uninstallLog(providerName, sourceName, log)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
setLogSize(t, providerName, gigabyte)
|
||||||
|
|
||||||
|
// Publish large test messages.
|
||||||
|
totalEvents := 1000
|
||||||
|
for i := 0; i < totalEvents; i++ {
|
||||||
|
err = log.Report(elog.Info, uint32(i%1000), []string{strconv.Itoa(i) + " " + randString(31800)})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("ReportEvent error", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
eventlog, err := newWinEventLog(map[string]interface{}{"name": providerName, "batch_read_size": 1024})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
err = eventlog.Open(0)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
err := eventlog.Close()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
var eventCount int
|
||||||
|
for eventCount < totalEvents {
|
||||||
|
records, err := eventlog.Read()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("read error", err)
|
||||||
|
}
|
||||||
|
if len(records) == 0 {
|
||||||
|
t.Fatal("read returned 0 records")
|
||||||
|
}
|
||||||
|
eventCount += len(records)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("number of records returned: %v", eventCount)
|
||||||
|
|
||||||
|
wineventlog := eventlog.(*winEventLog)
|
||||||
|
assert.Equal(t, 1024, wineventlog.maxRead)
|
||||||
|
|
||||||
|
expvar.Do(func(kv expvar.KeyValue) {
|
||||||
|
if kv.Key == "read_errors" {
|
||||||
|
t.Log(kv)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
1
vendor/github.com/elastic/beats/winlogbeat/sys/wineventlog/syscall_windows.go
generated
vendored
1
vendor/github.com/elastic/beats/winlogbeat/sys/wineventlog/syscall_windows.go
generated
vendored
@ -13,6 +13,7 @@ const (
|
|||||||
ERROR_INSUFFICIENT_BUFFER syscall.Errno = 122
|
ERROR_INSUFFICIENT_BUFFER syscall.Errno = 122
|
||||||
ERROR_NO_MORE_ITEMS syscall.Errno = 259
|
ERROR_NO_MORE_ITEMS syscall.Errno = 259
|
||||||
ERROR_NONE_MAPPED syscall.Errno = 1332
|
ERROR_NONE_MAPPED syscall.Errno = 1332
|
||||||
|
RPC_S_INVALID_BOUND syscall.Errno = 1734
|
||||||
ERROR_INVALID_OPERATION syscall.Errno = 4317
|
ERROR_INVALID_OPERATION syscall.Errno = 4317
|
||||||
ERROR_EVT_MESSAGE_NOT_FOUND syscall.Errno = 15027
|
ERROR_EVT_MESSAGE_NOT_FOUND syscall.Errno = 15027
|
||||||
ERROR_EVT_MESSAGE_ID_NOT_FOUND syscall.Errno = 15028
|
ERROR_EVT_MESSAGE_ID_NOT_FOUND syscall.Errno = 15028
|
||||||
|
4
vendor/github.com/elastic/beats/winlogbeat/sys/wineventlog/wineventlog_windows.go
generated
vendored
4
vendor/github.com/elastic/beats/winlogbeat/sys/wineventlog/wineventlog_windows.go
generated
vendored
@ -125,6 +125,10 @@ func Subscribe(
|
|||||||
// handles available to return. Close must be called on each returned EvtHandle
|
// handles available to return. Close must be called on each returned EvtHandle
|
||||||
// when finished with the handle.
|
// when finished with the handle.
|
||||||
func EventHandles(subscription EvtHandle, maxHandles int) ([]EvtHandle, error) {
|
func EventHandles(subscription EvtHandle, maxHandles int) ([]EvtHandle, error) {
|
||||||
|
if maxHandles < 1 {
|
||||||
|
return nil, fmt.Errorf("maxHandles must be greater than 0")
|
||||||
|
}
|
||||||
|
|
||||||
eventHandles := make([]EvtHandle, maxHandles)
|
eventHandles := make([]EvtHandle, maxHandles)
|
||||||
var numRead uint32
|
var numRead uint32
|
||||||
|
|
||||||
|
2
vendor/github.com/elastic/beats/winlogbeat/winlogbeat.template-es2x.json
generated
vendored
2
vendor/github.com/elastic/beats/winlogbeat/winlogbeat.template-es2x.json
generated
vendored
@ -7,7 +7,7 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"_meta": {
|
"_meta": {
|
||||||
"version": "5.1.1"
|
"version": "5.1.2"
|
||||||
},
|
},
|
||||||
"dynamic_templates": [
|
"dynamic_templates": [
|
||||||
{
|
{
|
||||||
|
2
vendor/github.com/elastic/beats/winlogbeat/winlogbeat.template.json
generated
vendored
2
vendor/github.com/elastic/beats/winlogbeat/winlogbeat.template.json
generated
vendored
@ -5,7 +5,7 @@
|
|||||||
"norms": false
|
"norms": false
|
||||||
},
|
},
|
||||||
"_meta": {
|
"_meta": {
|
||||||
"version": "5.1.1"
|
"version": "5.1.2"
|
||||||
},
|
},
|
||||||
"dynamic_templates": [
|
"dynamic_templates": [
|
||||||
{
|
{
|
||||||
|
Loading…
x
Reference in New Issue
Block a user