mirror of
https://github.com/Icinga/icingabeat.git
synced 2025-07-28 16:24:03 +02:00
Update to libbeat 6.3
This commit is contained in:
parent
250e248f98
commit
1269707447
111
vendor/github.com/elastic/beats/.appveyor.yml
generated
vendored
111
vendor/github.com/elastic/beats/.appveyor.yml
generated
vendored
@ -1,111 +0,0 @@
|
|||||||
# Version format
|
|
||||||
version: "{build}"
|
|
||||||
|
|
||||||
# Operating system (build VM template)
|
|
||||||
os: Windows Server 2012 R2
|
|
||||||
|
|
||||||
# Environment variables
|
|
||||||
environment:
|
|
||||||
GOPATH: c:\gopath
|
|
||||||
GVM_DL: https://github.com/andrewkroh/gvm/releases/download/v0.0.1/gvm-windows-amd64.exe
|
|
||||||
PYWIN_DL: https://beats-files.s3.amazonaws.com/deps/pywin32-220.win32-py2.7.exe
|
|
||||||
matrix:
|
|
||||||
- PROJ: github.com\elastic\beats\metricbeat
|
|
||||||
BEAT: metricbeat
|
|
||||||
- PROJ: github.com\elastic\beats\filebeat
|
|
||||||
BEAT: filebeat
|
|
||||||
- PROJ: github.com\elastic\beats\winlogbeat
|
|
||||||
BEAT: winlogbeat
|
|
||||||
|
|
||||||
# Custom clone folder (variables are not expanded here).
|
|
||||||
clone_folder: c:\gopath\src\github.com\elastic\beats
|
|
||||||
|
|
||||||
# Cache files until appveyor.yml is modified.
|
|
||||||
cache:
|
|
||||||
- C:\ProgramData\chocolatey\bin -> .appveyor.yml
|
|
||||||
- C:\ProgramData\chocolatey\lib -> .appveyor.yml
|
|
||||||
- C:\Users\appveyor\.gvm -> .go-version
|
|
||||||
- C:\Windows\System32\gvm.exe -> .appveyor.yml
|
|
||||||
- C:\tools\mingw64 -> .appveyor.yml
|
|
||||||
- C:\pywin_inst.exe -> .appveyor.yml
|
|
||||||
|
|
||||||
# Scripts that run after cloning repository
|
|
||||||
install:
|
|
||||||
- ps: >-
|
|
||||||
if(!(Test-Path "C:\Windows\System32\gvm.exe")) {
|
|
||||||
wget "$env:GVM_DL" -Outfile C:\Windows\System32\gvm.exe
|
|
||||||
}
|
|
||||||
- ps: gvm --format=powershell $(Get-Content .go-version) | Invoke-Expression
|
|
||||||
# AppVeyor installed mingw is 32-bit only so install 64-bit version.
|
|
||||||
- ps: >-
|
|
||||||
if(!(Test-Path "C:\tools\mingw64\bin\gcc.exe")) {
|
|
||||||
cinst mingw > mingw-install.txt
|
|
||||||
Push-AppveyorArtifact mingw-install.txt
|
|
||||||
}
|
|
||||||
- set PATH=C:\tools\mingw64\bin;%PATH%
|
|
||||||
- set PATH=%GOPATH%\bin;%PATH%
|
|
||||||
- go install github.com/elastic/beats/vendor/github.com/pierrre/gotestcover
|
|
||||||
- go version
|
|
||||||
- go env
|
|
||||||
# Download the PyWin32 installer if it is not cached.
|
|
||||||
- ps: >-
|
|
||||||
if(!(Test-Path "C:\pywin_inst.exe")) {
|
|
||||||
(new-object net.webclient).DownloadFile("$env:PYWIN_DL", 'C:/pywin_inst.exe')
|
|
||||||
}
|
|
||||||
- set PYTHONPATH=C:\Python27
|
|
||||||
- set PATH=%PYTHONPATH%;%PYTHONPATH%\Scripts;%PATH%
|
|
||||||
- python --version
|
|
||||||
- pip install six jinja2 nose nose-timer PyYAML redis elasticsearch
|
|
||||||
- easy_install C:/pywin_inst.exe
|
|
||||||
|
|
||||||
# To run your custom scripts instead of automatic MSBuild
|
|
||||||
build_script:
|
|
||||||
# Compile
|
|
||||||
- appveyor AddCompilationMessage "Starting Compile"
|
|
||||||
- ps: cd $env:BEAT
|
|
||||||
- go build
|
|
||||||
- appveyor AddCompilationMessage "Compile Success" -FileName "%BEAT%.exe"
|
|
||||||
|
|
||||||
# To run your custom scripts instead of automatic tests
|
|
||||||
test_script:
|
|
||||||
# Unit tests
|
|
||||||
- ps: Add-AppveyorTest "Unit Tests" -Outcome Running
|
|
||||||
- mkdir build\coverage
|
|
||||||
- gotestcover -race -coverprofile=build/coverage/integration.cov github.com/elastic/beats/%BEAT%/...
|
|
||||||
- ps: Update-AppveyorTest "Unit Tests" -Outcome Passed
|
|
||||||
# System tests
|
|
||||||
- ps: Add-AppveyorTest "System tests" -Outcome Running
|
|
||||||
- go test -race -c -cover -covermode=atomic -coverpkg ./...
|
|
||||||
- ps: |
|
|
||||||
if ($env:BEAT -eq "metricbeat") {
|
|
||||||
cp .\_meta\fields.common.yml .\_meta\fields.generated.yml
|
|
||||||
python .\scripts\fields_collector.py | out-file -append -encoding UTF8 -filepath .\_meta\fields.generated.yml
|
|
||||||
}
|
|
||||||
- ps: cd tests/system
|
|
||||||
- nosetests --with-timer
|
|
||||||
- ps: Update-AppveyorTest "System tests" -Outcome Passed
|
|
||||||
|
|
||||||
after_test:
|
|
||||||
- ps: cd $env:GOPATH\src\$env:PROJ
|
|
||||||
- python ..\dev-tools\aggregate_coverage.py -o build\coverage\system.cov .\build\system-tests\run
|
|
||||||
- python ..\dev-tools\aggregate_coverage.py -o build\coverage\full.cov .\build\coverage
|
|
||||||
- go tool cover -html=build\coverage\full.cov -o build\coverage\full.html
|
|
||||||
- ps: Push-AppveyorArtifact build\coverage\full.cov
|
|
||||||
- ps: Push-AppveyorArtifact build\coverage\full.html
|
|
||||||
# Upload coverage report.
|
|
||||||
- "SET PATH=C:\\Python34;C:\\Python34\\Scripts;%PATH%"
|
|
||||||
- pip install codecov
|
|
||||||
- ps: cd $env:GOPATH\src\github.com\elastic\beats
|
|
||||||
- codecov -X gcov -f "%BEAT%\build\coverage\full.cov"
|
|
||||||
|
|
||||||
# Executes for both successful and failed builds
|
|
||||||
on_finish:
|
|
||||||
- ps: cd $env:GOPATH\src\$env:PROJ
|
|
||||||
- 7z a -r system-tests-output.zip build\system-tests\run
|
|
||||||
- ps: Push-AppveyorArtifact system-tests-output.zip
|
|
||||||
|
|
||||||
# To disable deployment
|
|
||||||
deploy: off
|
|
||||||
|
|
||||||
# Notifications should only be setup using the AppVeyor UI so that
|
|
||||||
# forks can be created without inheriting the settings.
|
|
1
vendor/github.com/elastic/beats/.gitignore
generated
vendored
1
vendor/github.com/elastic/beats/.gitignore
generated
vendored
@ -7,6 +7,7 @@
|
|||||||
/*/logs
|
/*/logs
|
||||||
/*/fields.yml
|
/*/fields.yml
|
||||||
/*/*.template*.json
|
/*/*.template*.json
|
||||||
|
**/html_docs
|
||||||
|
|
||||||
# Files
|
# Files
|
||||||
.DS_Store
|
.DS_Store
|
||||||
|
2
vendor/github.com/elastic/beats/.go-version
generated
vendored
2
vendor/github.com/elastic/beats/.go-version
generated
vendored
@ -1 +1 @@
|
|||||||
1.9.2
|
1.9.4
|
||||||
|
24
vendor/github.com/elastic/beats/.travis.yml
generated
vendored
24
vendor/github.com/elastic/beats/.travis.yml
generated
vendored
@ -14,7 +14,7 @@ env:
|
|||||||
- GOX_FLAGS="-arch amd64"
|
- GOX_FLAGS="-arch amd64"
|
||||||
- DOCKER_COMPOSE_VERSION=1.11.1
|
- DOCKER_COMPOSE_VERSION=1.11.1
|
||||||
- GO_VERSION="$(cat .go-version)"
|
- GO_VERSION="$(cat .go-version)"
|
||||||
- TRAVIS_ETCD_VERSION=v3.2.8
|
- TRAVIS_MINIKUBE_VERSION=v0.25.2
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
include:
|
include:
|
||||||
@ -50,7 +50,7 @@ jobs:
|
|||||||
go: $GO_VERSION
|
go: $GO_VERSION
|
||||||
stage: test
|
stage: test
|
||||||
- os: osx
|
- os: osx
|
||||||
env: TARGETS="-C auditbeat testsuite"
|
env: TARGETS="TEST_ENVIRONMENT=0 -C auditbeat testsuite"
|
||||||
go: $GO_VERSION
|
go: $GO_VERSION
|
||||||
stage: test
|
stage: test
|
||||||
- os: linux
|
- os: linux
|
||||||
@ -68,7 +68,7 @@ jobs:
|
|||||||
go: $GO_VERSION
|
go: $GO_VERSION
|
||||||
stage: test
|
stage: test
|
||||||
- os: linux
|
- os: linux
|
||||||
env: TARGETS="-C libbeat stress-tests"
|
env: STRESS_TEST_OPTIONS="-timeout=20m -race -v -parallel 1" TARGETS="-C libbeat stress-tests"
|
||||||
go: $GO_VERSION
|
go: $GO_VERSION
|
||||||
stage: test
|
stage: test
|
||||||
|
|
||||||
@ -113,19 +113,31 @@ jobs:
|
|||||||
install: deploy/kubernetes/.travis/setup.sh
|
install: deploy/kubernetes/.travis/setup.sh
|
||||||
env:
|
env:
|
||||||
- TARGETS="-C deploy/kubernetes test"
|
- TARGETS="-C deploy/kubernetes test"
|
||||||
- TRAVIS_KUBE_VERSION=v1.6.11
|
- TRAVIS_K8S_VERSION=v1.6.4
|
||||||
stage: test
|
stage: test
|
||||||
- os: linux
|
- os: linux
|
||||||
install: deploy/kubernetes/.travis/setup.sh
|
install: deploy/kubernetes/.travis/setup.sh
|
||||||
env:
|
env:
|
||||||
- TARGETS="-C deploy/kubernetes test"
|
- TARGETS="-C deploy/kubernetes test"
|
||||||
- TRAVIS_KUBE_VERSION=v1.7.7
|
- TRAVIS_K8S_VERSION=v1.7.5
|
||||||
stage: test
|
stage: test
|
||||||
- os: linux
|
- os: linux
|
||||||
install: deploy/kubernetes/.travis/setup.sh
|
install: deploy/kubernetes/.travis/setup.sh
|
||||||
env:
|
env:
|
||||||
- TARGETS="-C deploy/kubernetes test"
|
- TARGETS="-C deploy/kubernetes test"
|
||||||
- TRAVIS_KUBE_VERSION=v1.8.0
|
- TRAVIS_K8S_VERSION=v1.8.0
|
||||||
|
stage: test
|
||||||
|
- os: linux
|
||||||
|
install: deploy/kubernetes/.travis/setup.sh
|
||||||
|
env:
|
||||||
|
- TARGETS="-C deploy/kubernetes test"
|
||||||
|
- TRAVIS_K8S_VERSION=v1.9.4
|
||||||
|
stage: test
|
||||||
|
- os: linux
|
||||||
|
install: deploy/kubernetes/.travis/setup.sh
|
||||||
|
env:
|
||||||
|
- TARGETS="-C deploy/kubernetes test"
|
||||||
|
- TRAVIS_K8S_VERSION=v1.10.0
|
||||||
stage: test
|
stage: test
|
||||||
|
|
||||||
addons:
|
addons:
|
||||||
|
29
vendor/github.com/elastic/beats/CHANGELOG-developer.asciidoc
generated
vendored
Normal file
29
vendor/github.com/elastic/beats/CHANGELOG-developer.asciidoc
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
// Use these for links to issue and pulls. Note issues and pulls redirect one to
|
||||||
|
// each other on Github, so don't worry too much on using the right prefix.
|
||||||
|
:issue: https://github.com/elastic/beats/issues/
|
||||||
|
:pull: https://github.com/elastic/beats/pull/
|
||||||
|
|
||||||
|
This changelog is intended for community Beat developers. It covers the major
|
||||||
|
breaking changes to the internal APIs in the official Beats and changes related
|
||||||
|
to developing a Beat like code generators or `fields.yml`. Only the major
|
||||||
|
changes will be covered in this changelog that are expected to affect community
|
||||||
|
developers. Each breaking change added here should have an explanation on how
|
||||||
|
other Beats should be migrated.
|
||||||
|
|
||||||
|
Note: This changelog was only started after the 6.3 release.
|
||||||
|
|
||||||
|
=== Beats version HEAD
|
||||||
|
https://github.com/elastic/beats/compare/v6.3.0..master[Check the HEAD diff]
|
||||||
|
|
||||||
|
The list below covers the major changes between 6.3.0 and master only.
|
||||||
|
|
||||||
|
==== Breaking changes
|
||||||
|
|
||||||
|
- The beat.Pipeline is now passed to cfgfile.RunnerFactory. Beats using libbeat for module reloading or autodiscovery need to be adapted. {pull}7018[7017]
|
||||||
|
- Moving of TLS helper functions and structs from `output/tls` to `tlscommon`. {pull}7054[7054]
|
||||||
|
|
||||||
|
==== Bugfixes
|
||||||
|
|
||||||
|
- Fix permissions of generated Filebeat filesets. {pull}7140[7140]
|
||||||
|
|
||||||
|
==== Added
|
355
vendor/github.com/elastic/beats/CHANGELOG.asciidoc
generated
vendored
355
vendor/github.com/elastic/beats/CHANGELOG.asciidoc
generated
vendored
@ -1,4 +1,3 @@
|
|||||||
|
|
||||||
// Use these for links to issue and pulls. Note issues and pulls redirect one to
|
// Use these for links to issue and pulls. Note issues and pulls redirect one to
|
||||||
// each other on Github, so don't worry too much on using the right prefix.
|
// each other on Github, so don't worry too much on using the right prefix.
|
||||||
:issue: https://github.com/elastic/beats/issues/
|
:issue: https://github.com/elastic/beats/issues/
|
||||||
@ -8,7 +7,7 @@
|
|||||||
// Template, add newest changes here
|
// Template, add newest changes here
|
||||||
|
|
||||||
=== Beats version HEAD
|
=== Beats version HEAD
|
||||||
https://github.com/elastic/beats/compare/v6.2.2...6.2[Check the HEAD diff]
|
https://github.com/elastic/beats/compare/v6.3.2...6.3[Check the HEAD diff]
|
||||||
|
|
||||||
==== Breaking changes
|
==== Breaking changes
|
||||||
|
|
||||||
@ -32,8 +31,6 @@ https://github.com/elastic/beats/compare/v6.2.2...6.2[Check the HEAD diff]
|
|||||||
|
|
||||||
*Auditbeat*
|
*Auditbeat*
|
||||||
|
|
||||||
- Add hex decoding for the name field in audit path records. {pull}6687[6687]
|
|
||||||
|
|
||||||
*Filebeat*
|
*Filebeat*
|
||||||
|
|
||||||
*Heartbeat*
|
*Heartbeat*
|
||||||
@ -42,8 +39,6 @@ https://github.com/elastic/beats/compare/v6.2.2...6.2[Check the HEAD diff]
|
|||||||
|
|
||||||
*Packetbeat*
|
*Packetbeat*
|
||||||
|
|
||||||
- HTTP parses successfully on empty status phrase. {issue}6176[6176]
|
|
||||||
|
|
||||||
*Winlogbeat*
|
*Winlogbeat*
|
||||||
|
|
||||||
==== Added
|
==== Added
|
||||||
@ -81,6 +76,292 @@ https://github.com/elastic/beats/compare/v6.2.2...6.2[Check the HEAD diff]
|
|||||||
|
|
||||||
////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
[[release-notes-6.3.2]]
|
||||||
|
=== Beats version 6.3.2
|
||||||
|
https://github.com/elastic/beats/compare/v6.3.1...v6.3.2[View commits]
|
||||||
|
|
||||||
|
==== Bugfixes
|
||||||
|
|
||||||
|
*Affecting all Beats*
|
||||||
|
|
||||||
|
- Fix registry duplicates and log resending on upgrade. {issue}7634[7634]
|
||||||
|
- Fix default value for logging.files.keepfiles. It was being set to 0 and now
|
||||||
|
it's set to the documented value of 7. {issue}7494[7494]
|
||||||
|
- Retain compatibility with older Docker server versions. {issue}7542[7542]
|
||||||
|
|
||||||
|
*Metricbeat*
|
||||||
|
|
||||||
|
- Fix missing hosts config option in Ceph module. {pull}7596[7596]
|
||||||
|
- Ensure metadata updates don't replace existing pod metrics. {pull}7573[7573]
|
||||||
|
|
||||||
|
==== Added
|
||||||
|
|
||||||
|
*Metricbeat*
|
||||||
|
|
||||||
|
- Add support for bearer token files to HTTP helper. {pull}7527[7527]
|
||||||
|
|
||||||
|
*Packetbeat*
|
||||||
|
|
||||||
|
- Updated the TLS protocol parser with new cipher suites added to TLS 1.3. {issue}7455[7455]
|
||||||
|
|
||||||
|
|
||||||
|
[[release-notes-6.3.1]]
|
||||||
|
=== Beats version 6.3.1
|
||||||
|
https://github.com/elastic/beats/compare/v6.3.0...v6.3.1[View commits]
|
||||||
|
|
||||||
|
==== Bugfixes
|
||||||
|
|
||||||
|
*Affecting all Beats*
|
||||||
|
|
||||||
|
- Allow index-pattern only setup when setup.dashboards.only_index=true. {pull}7285[7285]
|
||||||
|
- Preserve the event when source matching fails in `add_docker_metadata`. {pull}7133[7133]
|
||||||
|
- Negotiate Docker API version from our client instead of using a hardcoded one. {pull}7165[7165]
|
||||||
|
- Fix duplicating dynamic_fields in template when overwriting the template. {pull}7352[7352]
|
||||||
|
|
||||||
|
*Auditbeat*
|
||||||
|
|
||||||
|
- Fixed parsing of AppArmor audit messages. {pull}6978[6978]
|
||||||
|
|
||||||
|
*Filebeat*
|
||||||
|
|
||||||
|
- Comply with PostgreSQL database name format {pull}7198[7198]
|
||||||
|
- Optimize PostgreSQL ingest pipeline to use anchored regexp and merge multiple regexp into a single expression. {pull}7269[7269]
|
||||||
|
- Keep different registry entry per container stream to avoid wrong offsets. {issue}7281[7281]
|
||||||
|
- Fix offset field pointing at end of a line. {issue}6514[6514]
|
||||||
|
- Commit registry writes to stable storage to avoid corrupt registry files. {issue}6792[6792]
|
||||||
|
|
||||||
|
*Metricbeat*
|
||||||
|
|
||||||
|
- Fix field mapping for the system process CPU ticks fields. {pull}7230[7230]
|
||||||
|
- Ensure canonical naming for JMX beans is disabled in Jolokia module. {pull}7047[7047]
|
||||||
|
- Fix Jolokia attribute mapping when using wildcards and MBean names with multiple properties. {pull}7321[7321]
|
||||||
|
|
||||||
|
*Packetbeat*
|
||||||
|
|
||||||
|
- Fix an out of bounds access in HTTP parser caused by malformed request. {pull}6997[6997]
|
||||||
|
- Fix missing type for `http.response.body` field. {pull}7169[7169]
|
||||||
|
|
||||||
|
==== Added
|
||||||
|
|
||||||
|
*Auditbeat*
|
||||||
|
|
||||||
|
- Added caching of UID and GID values to auditd module. {pull}6978[6978]
|
||||||
|
- Updated syscall tables for Linux 4.16. {pull}6978[6978]
|
||||||
|
- Added better error messages for when the auditd module fails due to the
|
||||||
|
Linux kernel not supporting auditing (CONFIG_AUDIT=n). {pull}7012[7012]
|
||||||
|
|
||||||
|
*Metricbeat*
|
||||||
|
|
||||||
|
- Collect accumulated docker network metrics and mark old ones as deprecated. {pull}7253[7253]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[[release-notes-6.3.0]]
|
||||||
|
=== Beats version 6.3.0
|
||||||
|
https://github.com/elastic/beats/compare/v6.2.3...v6.3.0[View commits]
|
||||||
|
|
||||||
|
==== Breaking changes
|
||||||
|
|
||||||
|
*Affecting all Beats*
|
||||||
|
|
||||||
|
- De dot keys of labels and annotations in kubernetes meta processors to prevent collisions. {pull}6203[6203]
|
||||||
|
- Rename `beat.cpu.*.time metrics` to `beat.cpu.*.time.ms`. {pull}6449[6449]
|
||||||
|
- Add `host.name` field to all events, to avoid mapping conflicts. This could be breaking Logstash configs if you rely on the `host` field being a string. {pull}7051[7051]
|
||||||
|
|
||||||
|
*Filebeat*
|
||||||
|
|
||||||
|
- Add validation for Stdin, when Filebeat is configured with Stdin and any other inputs, Filebeat
|
||||||
|
will now refuse to start. {pull}6463[6463]
|
||||||
|
- Mark `system.syslog.message` and `system.auth.message` as `text` instead of `keyword`. {pull}6589[6589]
|
||||||
|
|
||||||
|
*Metricbeat*
|
||||||
|
|
||||||
|
- De dot keys in kubernetes/event metricset to prevent collisions. {pull}6203[6203]
|
||||||
|
- Add config option for windows/perfmon metricset to ignore non existent counters. {pull}6432[6432]
|
||||||
|
- Refactor docker CPU calculations to be more consistent with `docker stats`. {pull}6608[6608]
|
||||||
|
- Update logstash.node_stats metricset to write data under `logstash.node.stats.*`. {pull}6714[6714]
|
||||||
|
|
||||||
|
==== Bugfixes
|
||||||
|
|
||||||
|
*Affecting all Beats*
|
||||||
|
|
||||||
|
- Fix panic when Events containing a float32 value are normalized. {pull}6129[6129]
|
||||||
|
- Fix `setup.dashboards.always_kibana` when using Kibana 5.6. {issue}6090[6090]
|
||||||
|
- Fix for Kafka logger. {pull}6430[6430]
|
||||||
|
- Remove double slashes in Windows service script. {pull}6491[6491]
|
||||||
|
- Ensure Kubernetes labels/annotations don't break mapping {pull}6490[6490]
|
||||||
|
- Ensure that the dashboard zip files can't contain files outside of the kibana directory. {pull}6921[6921]
|
||||||
|
- Fix map overwrite panics by cloning shared structs before doing the update. {pull}6947[6947]
|
||||||
|
- Fix delays on autodiscovery events handling caused by blocking runner stops. {pull}7170[7170]
|
||||||
|
- Do not emit Kubernetes autodiscover events for Pods without IP address. {pull}7235[7235]
|
||||||
|
|
||||||
|
*Auditbeat*
|
||||||
|
|
||||||
|
- Add hex decoding for the name field in audit path records. {pull}6687[6687]
|
||||||
|
- Fixed a deadlock in the file_integrity module under Windows. {issue}6864[6864]
|
||||||
|
|
||||||
|
*Filebeat*
|
||||||
|
|
||||||
|
- Fix panic when log prospector configuration fails to load. {issue}6800[6800]
|
||||||
|
- Fix memory leak in log prospector when files cannot be read. {issue}6797[6797]
|
||||||
|
- Add raw JSON to message field when JSON parsing fails. {issue}6516[6516]
|
||||||
|
- Commit registry writes to stable storage to avoid corrupt registry files. {pull}6877[6877]
|
||||||
|
- Fix a parsing issue in the syslog input for RFC3339 timestamp and time with nanoseconds. {pull}7046[7046]
|
||||||
|
- Fix an issue with an overflowing wait group when using the TCP input. {issue}7202[7202]
|
||||||
|
- Fix an issue when parsing ISO8601 dates with timezone definition {issue}7367[7367]
|
||||||
|
|
||||||
|
*Heartbeat*
|
||||||
|
|
||||||
|
- Fix race due to updates of shared a map, that was not supposed to be shared between multiple go-routines. {issue}6616[6616]
|
||||||
|
|
||||||
|
*Metricbeat*
|
||||||
|
|
||||||
|
- Fix the default configuration for Logstash to include the default port. {pull}6279[6279]
|
||||||
|
- Fix dealing with new process status codes in Linux kernel 4.14+. {pull}6306[6306]
|
||||||
|
- Add filtering option by exact device names in system.diskio. `diskio.include_devices`. {pull}6085[6085]
|
||||||
|
- Add connections metricset to RabbitMQ module {pull}6548[6548]
|
||||||
|
- Fix panic in http dependent modules when invalid config was used. {pull}6205[6205]
|
||||||
|
- Fix system.filesystem.used.pct value to match what df reports. {issue}5494[5494]
|
||||||
|
- Fix namespace disambiguation in Kubernetes state_* metricsets. {issue}6281[6281]
|
||||||
|
- Fix Windows perfmon metricset so that it sends metrics when an error occurs. {pull}6542[6542]
|
||||||
|
- Fix Kubernetes calculated fields store. {pull}6564{6564}
|
||||||
|
- Exclude bind mounts in fsstat and filesystem metricsets. {pull}6819[6819]
|
||||||
|
- Don't stop Metricbeat if aerospike server is down. {pull}6874[6874]
|
||||||
|
- disk reads and write count metrics in RabbitMQ queue metricset made optional. {issue}6876[6876]
|
||||||
|
- Add mapping for docker metrics per cpu. {pull}6843[6843]
|
||||||
|
|
||||||
|
*Winlogbeat*
|
||||||
|
|
||||||
|
- Fixed a crash under Windows 2003 and XP when an event had less insert strings than required by its format string. {pull}6247[6247]
|
||||||
|
|
||||||
|
==== Added
|
||||||
|
|
||||||
|
*Affecting all Beats*
|
||||||
|
|
||||||
|
- Update Golang 1.9.4 {pull}6326[6326]
|
||||||
|
- Add the ability to log to the Windows Event Log. {pull}5913[5913]
|
||||||
|
- The node name can be discovered automatically by machine-id matching when beat deployed outside Kubernetes cluster. {pull}6146[6146]
|
||||||
|
- Panics will be written to the logger before exiting. {pull}6199[6199]
|
||||||
|
- Add builder support for autodiscover and annotations builder {pull}6408[6408]
|
||||||
|
- Add plugin support for autodiscover builders, providers {pull}6457[6457]
|
||||||
|
- Preserve runtime from container statuses in Kubernetes autodiscover {pull}6456[6456]
|
||||||
|
- Experimental feature setup.template.append_fields added. {pull}6024[6024]
|
||||||
|
- Add appender support to autodiscover {pull}6469[6469]
|
||||||
|
- Add add_host_metadata processor {pull}5968[5968]
|
||||||
|
- Retry configuration to load dashboards if Kibana is not reachable when the beat starts. {pull}6560[6560]
|
||||||
|
- Add `has_fields` conditional to filter events based on the existence of all the given fields. {issue}6285[6285] {pull}6653[6653]
|
||||||
|
- Add support for spooling to disk to the beats event publishing pipeline. {pull}6581[6581]
|
||||||
|
- Added logging of system info at Beat startup. {issue}5946[5946]
|
||||||
|
- Do not log errors if X-Pack Monitoring is enabled but Elastisearch X-Pack is not. {pull}6627[6627]
|
||||||
|
- Add rename processor. {pull}6292[6292]
|
||||||
|
- Allow override of dynamic template `match_mapping_type` for fields with object_type. {pull}6691[6691]
|
||||||
|
|
||||||
|
*Filebeat*
|
||||||
|
|
||||||
|
- Add IIS module to parse access log and error log. {pull}6127[6127]
|
||||||
|
- Renaming of the prospector type to the input type and all prospectors are now moved to the input
|
||||||
|
folder, to maintain backward compatibility type aliasing was used to map the old type to the new
|
||||||
|
one. This change also affect YAML configuration. {pull}6078[6078]
|
||||||
|
- Addition of the TCP input {pull}6700[6700]
|
||||||
|
- Add option to convert the timestamps to UTC in the system module. {pull}5647[5647]
|
||||||
|
- Add Logstash module support for main log and the slow log, support the plain text or structured JSON format {pull}5481[5481]
|
||||||
|
- Add stream filtering when using `docker` prospector. {pull}6057[6057]
|
||||||
|
- Add support for CRI logs format. {issue}5630[5630]
|
||||||
|
- Add json.ignore_decoding_error config to not log json decoding erors. {issue}6547[6547]
|
||||||
|
- Make registry file permission configurable. {pull}6455[6455]
|
||||||
|
- Add MongoDB module. {pull}6283[6238]
|
||||||
|
- Add Ingest pipeline loading to setup. {pull}6814[6814]
|
||||||
|
- Add support of log_format combined to NGINX access logs. {pull}6858[6858]
|
||||||
|
- Release config reloading feature as GA.
|
||||||
|
- Add support human friendly size for the UDP input. {pull}6886[6886]
|
||||||
|
- Add Syslog input to ingest RFC3164 Events via TCP and UDP {pull}6842[6842]
|
||||||
|
- Remove the undefined `username` option from the Redis input and clarify the documentation. {pull}6662[6662]
|
||||||
|
|
||||||
|
*Heartbeat*
|
||||||
|
|
||||||
|
- Made the URL field of Heartbeat aggregateable. {pull}6263[6263]
|
||||||
|
- Use `match.Matcher` for checking Heartbeat response bodies with regular expressions. {pull}6539[6539]
|
||||||
|
|
||||||
|
*Metricbeat*
|
||||||
|
|
||||||
|
- Support apache status pages for versions older than 2.4.16. {pull}6450[6450]
|
||||||
|
- Add support for huge pages on Linux. {pull}6436[6436]
|
||||||
|
- Support to optionally 'de dot' keys in http/json metricset to prevent collisions. {pull}5970[5970]
|
||||||
|
- Add graphite protocol metricbeat module. {pull}4734[4734]
|
||||||
|
- Add http server metricset to support push metrics via http. {pull}4770[4770]
|
||||||
|
- Make config object public for graphite and http server {pull}4820[4820]
|
||||||
|
- Add system uptime metricset. {issue}4848[4848]
|
||||||
|
- Add experimental `queue` metricset to RabbitMQ module. {pull}4788[4788]
|
||||||
|
- Add additional php-fpm pool status kpis for Metricbeat module {pull}5287[5287]
|
||||||
|
- Add etcd module. {issue}4970[4970]
|
||||||
|
- Add ip address of docker containers to event. {pull}5379[5379]
|
||||||
|
- Add ceph osd tree information to metricbeat {pull}5498[5498]
|
||||||
|
- Add ceph osd_df to metricbeat {pull}5606[5606]
|
||||||
|
- Add basic Logstash module. {pull}5540[5540]
|
||||||
|
- Add dashboard for Windows service metricset. {pull}5603[5603]
|
||||||
|
- Add pct calculated fields for Pod and container CPU and memory usages. {pull}6158[6158]
|
||||||
|
- Add statefulset support to Kubernetes module. {pull}6236[6236]
|
||||||
|
- Refactor prometheus endpoint parsing to look similar to upstream prometheus {pull}6332[6332]
|
||||||
|
- Making the http/json metricset GA. {pull}6471[6471]
|
||||||
|
- Add support for array in http/json metricset. {pull}6480[6480]
|
||||||
|
- Making the jolokia/jmx module GA. {pull}6143[6143]
|
||||||
|
- Making the MongoDB module GA. {pull}6554[6554]
|
||||||
|
- Allow to disable labels `dedot` in Docker module, in favor of a safe way to keep dots. {pull}6490[6490]
|
||||||
|
- Add experimental module to collect metrics from munin nodes. {pull}6517[6517]
|
||||||
|
- Add support for wildcards and explicit metrics grouping in jolokia/jmx. {pull}6462[6462]
|
||||||
|
- Set `collector` as default metricset in Prometheus module. {pull}6636[6636] {pull}6747[6747]
|
||||||
|
- Set `mntr` as default metricset in Zookeeper module. {pull}6674[6674]
|
||||||
|
- Set default metricsets in vSphere module. {pull}6676[6676]
|
||||||
|
- Set `status` as default metricset in Apache module. {pull}6673[6673]
|
||||||
|
- Set `namespace` as default metricset in Aerospike module. {pull}6669[6669]
|
||||||
|
- Set `service` as default metricset in Windows module. {pull}6675[6675]
|
||||||
|
- Set all metricsets as default metricsets in uwsgi module. {pull}6688[6688]
|
||||||
|
- Allow autodiscover to monitor unexposed ports {pull}6727[6727]
|
||||||
|
- Mark kubernetes.event metricset as beta. {pull}6715[6715]
|
||||||
|
- Set all metricsets as default metricsets in couchbase module. {pull}6683[6683]
|
||||||
|
- Mark uwsgi module and metricset as beta. {pull}6717[6717]
|
||||||
|
- Mark Golang module and metricsets as beta. {pull}6711[6711]
|
||||||
|
- Mark system.raid metricset as beta. {pull}6710[6710]
|
||||||
|
- Mark http.server metricset as beta. {pull}6712[6712]
|
||||||
|
- Mark metricbeat logstash module and metricsets as beta. {pull}6713[6713]
|
||||||
|
- Set all metricsets as default metricsets in Ceph module. {pull}6676[6676]
|
||||||
|
- Set `container`, `cpu`, `diskio`, `healthcheck`, `info`, `memory` and `network` in docker module as default. {pull}6718[6718]
|
||||||
|
- Set `cpu`, `load`, `memory`, `network`, `process` and `process_summary` as default metricsets in system module. {pull}6689[6689]
|
||||||
|
- Set `collector` as default metricset in Dropwizard module. {pull}6669[6669]
|
||||||
|
- Set `info` and `keyspace` as default metricsets in redis module. {pull}6742[6742]
|
||||||
|
- Set `connection` as default metricset in rabbitmq module. {pull}6743[6743]
|
||||||
|
- Set all metricsets as default metricsets in Elasticsearch module. {pull}6755[6755]
|
||||||
|
- Set all metricsets as default metricsets in Etcd module. {pull}6756[6756]
|
||||||
|
- Set server metricsets as default in Graphite module. {pull}6757[6757]
|
||||||
|
- Set all metricsets as default metricsets in HAProxy module. {pull}6758[6758]
|
||||||
|
- Set all metricsets as default metricsets in Kafka module. {pull}6759[6759]
|
||||||
|
- Set all metricsets as default metricsets in postgresql module. {pull}6761[6761]
|
||||||
|
- Set status metricsets as default in Kibana module. {pull}6762[6762]
|
||||||
|
- Set all metricsets as default metricsets in Logstash module. {pull}6763[6763]
|
||||||
|
- Set `container`, `node`, `pod`, `system`, `volume` as default in Kubernetes module. {pull} 6764[6764]
|
||||||
|
- Set `stats` as default in memcached module. {pull}6765[6765]
|
||||||
|
- Set all metricsets as default metricsets in Mongodb module. {pull}6766[6766]
|
||||||
|
- Set `pool` as default metricset for php_fpm module. {pull}6768[6768]
|
||||||
|
- Set `status` as default metricset for mysql module. {pull} 6769[6769]
|
||||||
|
- Set `stubstatus` as default metricset for nginx module. {pull}6770[6770]
|
||||||
|
- Added support for haproxy 1.7 and 1.8. {pull}6793[6793]
|
||||||
|
- Add accumulated I/O stats to diskio in the line of `docker stats`. {pull}6701[6701]
|
||||||
|
- Ignore virtual filesystem types by default in system module. {pull}6819[6819]
|
||||||
|
- Release config reloading feature as GA. {pull}6891[6891]
|
||||||
|
- Kubernetes deployment: Add ServiceAccount config to system metricbeat. {pull}6824[6824]
|
||||||
|
- Kubernetes deployment: Add DNS Policy to system metricbeat. {pull}6656[6656]
|
||||||
|
|
||||||
|
*Packetbeat*
|
||||||
|
|
||||||
|
- Add support for condition on bool type {issue}5659[5659] {pull}5954[5954]
|
||||||
|
- Fix high memory usage on HTTP body if body is not published. {pull}6680[6680]
|
||||||
|
- Allow to capture the HTTP request or response bodies independently. {pull}6784[6784]
|
||||||
|
- HTTP publishes an Error event for unmatched requests or responses. {pull}6794[6794]
|
||||||
|
|
||||||
|
*Winlogbeat*
|
||||||
|
|
||||||
|
- Use bookmarks to persist the last published event. {pull}6150[6150]
|
||||||
|
|
||||||
|
|
||||||
[[release-notes-6.2.3]]
|
[[release-notes-6.2.3]]
|
||||||
=== Beats version 6.2.3
|
=== Beats version 6.2.3
|
||||||
@ -308,6 +589,10 @@ https://github.com/elastic/beats/compare/v6.0.1...v6.1.0[View commits]
|
|||||||
- Fix http parse to allow to parse get request with space in the URI. {pull}5495[5495]
|
- Fix http parse to allow to parse get request with space in the URI. {pull}5495[5495]
|
||||||
- Fix mysql SQL parser to trim `\r` from Windows Server `SELECT\r\n\t1`. {pull}5572[5572]
|
- Fix mysql SQL parser to trim `\r` from Windows Server `SELECT\r\n\t1`. {pull}5572[5572]
|
||||||
- Fix corruption when parsing repeated headers in an HTTP request or response. {pull}6325[6325]
|
- Fix corruption when parsing repeated headers in an HTTP request or response. {pull}6325[6325]
|
||||||
|
- Fix panic when parsing partial AMQP messages. {pull}6384[6384]
|
||||||
|
- Fix out of bounds access to slice in MongoDB parser. {pull}6256[6256]
|
||||||
|
- Fix sniffer hanging on exit under Linux. {pull}6535[6535]
|
||||||
|
- Fix bounds check error in http parser causing a panic. {pull}6750[6750]
|
||||||
|
|
||||||
*Winlogbeat*
|
*Winlogbeat*
|
||||||
|
|
||||||
@ -419,6 +704,7 @@ The list below covers the changes between 6.0.0-rc2 and 6.0.0 GA only.
|
|||||||
*Filebeat*
|
*Filebeat*
|
||||||
|
|
||||||
- Add Kubernetes manifests to deploy Filebeat. {pull}5349[5349]
|
- Add Kubernetes manifests to deploy Filebeat. {pull}5349[5349]
|
||||||
|
- Add container short ID matching to add_docker_metadata. {pull}6172[6172]
|
||||||
|
|
||||||
*Metricbeat*
|
*Metricbeat*
|
||||||
|
|
||||||
@ -923,63 +1209,6 @@ https://github.com/elastic/beats/compare/v5.4.0...v6.0.0-alpha1[View commits]
|
|||||||
|
|
||||||
- Prospector reloading only works properly with new files. {pull}3546[3546]
|
- Prospector reloading only works properly with new files. {pull}3546[3546]
|
||||||
|
|
||||||
[[release-notes-5.6.7]]
|
|
||||||
=== Beats version 5.6.7
|
|
||||||
https://github.com/elastic/beats/compare/v5.6.6...v5.6.7[View commits]
|
|
||||||
|
|
||||||
No changes in this release.
|
|
||||||
|
|
||||||
|
|
||||||
[[release-notes-5.6.6]]
|
|
||||||
=== Beats version 5.6.6
|
|
||||||
https://github.com/elastic/beats/compare/v5.6.5...v5.6.6[View commits]
|
|
||||||
|
|
||||||
No changes in this release.
|
|
||||||
|
|
||||||
|
|
||||||
[[release-notes-5.6.5]]
|
|
||||||
=== Beats version 5.6.5
|
|
||||||
https://github.com/elastic/beats/compare/v5.6.4...v5.6.5[View commits]
|
|
||||||
|
|
||||||
==== Bugfixes
|
|
||||||
|
|
||||||
*Affecting all Beats*
|
|
||||||
|
|
||||||
- Fix duplicate batches of events in retry queue. {pull}5520[5520]
|
|
||||||
|
|
||||||
*Metricbeat*
|
|
||||||
|
|
||||||
- Clarify meaning of percentages reported by system core metricset. {pull}5565[5565]
|
|
||||||
- Fix map overwrite in docker diskio module. {issue}5582[5582]
|
|
||||||
|
|
||||||
[[release-notes-5.6.4]]
|
|
||||||
=== Beats version 5.6.4
|
|
||||||
https://github.com/elastic/beats/compare/v5.6.3...v5.6.4[View commits]
|
|
||||||
|
|
||||||
==== Bugfixes
|
|
||||||
|
|
||||||
*Affecting all Beats*
|
|
||||||
|
|
||||||
- Fix race condition in internal logging rotator. {pull}4519[4519]
|
|
||||||
|
|
||||||
*Packetbeat*
|
|
||||||
|
|
||||||
- Fix missing length check in the PostgreSQL module. {pull}5457[5457]
|
|
||||||
|
|
||||||
==== Added
|
|
||||||
|
|
||||||
*Affecting all Beats*
|
|
||||||
|
|
||||||
- Add support for enabling TLS renegotiation. {issue}4386[4386]
|
|
||||||
- Add setting to enable/disable the slow start in logstash output. {pull}5400[5400]
|
|
||||||
|
|
||||||
[[release-notes-5.6.3]]
|
|
||||||
=== Beats version 5.6.3
|
|
||||||
https://github.com/elastic/beats/compare/v5.6.2...v5.6.3[View commits]
|
|
||||||
|
|
||||||
No changes in this release.
|
|
||||||
|
|
||||||
|
|
||||||
[[release-notes-5.6.2]]
|
[[release-notes-5.6.2]]
|
||||||
=== Beats version 5.6.2
|
=== Beats version 5.6.2
|
||||||
https://github.com/elastic/beats/compare/v5.6.1...v5.6.2[View commits]
|
https://github.com/elastic/beats/compare/v5.6.1...v5.6.2[View commits]
|
||||||
|
24
vendor/github.com/elastic/beats/LICENSE.txt
generated
vendored
24
vendor/github.com/elastic/beats/LICENSE.txt
generated
vendored
@ -1,13 +1,13 @@
|
|||||||
Copyright (c) 2012–2017 Elastic <http://www.elastic.co>
|
Source code in this repository is variously licensed under the Apache License
|
||||||
|
Version 2.0, an Apache compatible license, or the Elastic License. Outside of
|
||||||
|
the "x-pack" folder, source code in a given file is licensed under the Apache
|
||||||
|
License Version 2.0, unless otherwise noted at the beginning of the file or a
|
||||||
|
LICENSE file present in the directory subtree declares a separate license.
|
||||||
|
Within the "x-pack" folder, source code in a given file is licensed under the
|
||||||
|
Elastic License, unless otherwise noted at the beginning of the file or a
|
||||||
|
LICENSE file present in the directory subtree declares a separate license.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
The build produces two sets of binaries - one set that falls under the Elastic
|
||||||
you may not use this file except in compliance with the License.
|
License and another set that falls under Apache License Version 2.0. The
|
||||||
You may obtain a copy of the License at
|
binaries that contain `-oss` in the artifact name are licensed under the Apache
|
||||||
|
License Version 2.0.
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
|
10
vendor/github.com/elastic/beats/Makefile
generated
vendored
10
vendor/github.com/elastic/beats/Makefile
generated
vendored
@ -110,13 +110,13 @@ docs:
|
|||||||
@$(foreach var,$(PROJECTS),BUILD_DIR=${BUILD_DIR} $(MAKE) -C $(var) docs || exit 1;)
|
@$(foreach var,$(PROJECTS),BUILD_DIR=${BUILD_DIR} $(MAKE) -C $(var) docs || exit 1;)
|
||||||
sh ./script/build_docs.sh dev-guide github.com/elastic/beats/docs/devguide ${BUILD_DIR}
|
sh ./script/build_docs.sh dev-guide github.com/elastic/beats/docs/devguide ${BUILD_DIR}
|
||||||
|
|
||||||
.PHONY: package
|
.PHONY: package-all
|
||||||
package: update beats-dashboards
|
package-all: update beats-dashboards
|
||||||
@$(foreach var,$(BEATS),SNAPSHOT=$(SNAPSHOT) $(MAKE) -C $(var) package || exit 1;)
|
@$(foreach var,$(BEATS),SNAPSHOT=$(SNAPSHOT) $(MAKE) -C $(var) package-all || exit 1;)
|
||||||
|
|
||||||
@echo "Start building the dashboards package"
|
@echo "Start building the dashboards package"
|
||||||
@mkdir -p build/upload/
|
@mkdir -p build/upload/
|
||||||
@BUILD_DIR=${BUILD_DIR} SNAPSHOT=$(SNAPSHOT) $(MAKE) -C dev-tools/packer package-dashboards ${BUILD_DIR}/upload/build_id.txt
|
@BUILD_DIR=${BUILD_DIR} UPLOAD_DIR=${BUILD_DIR}/upload SNAPSHOT=$(SNAPSHOT) $(MAKE) -C dev-tools/packer package-dashboards ${BUILD_DIR}/upload/build_id.txt
|
||||||
@mv build/upload build/dashboards-upload
|
@mv build/upload build/dashboards-upload
|
||||||
|
|
||||||
@# Copy build files over to top build directory
|
@# Copy build files over to top build directory
|
||||||
@ -153,6 +153,8 @@ notice: python-env
|
|||||||
python-env:
|
python-env:
|
||||||
@test -d $(PYTHON_ENV) || virtualenv $(VIRTUALENV_PARAMS) $(PYTHON_ENV)
|
@test -d $(PYTHON_ENV) || virtualenv $(VIRTUALENV_PARAMS) $(PYTHON_ENV)
|
||||||
@$(PYTHON_ENV)/bin/pip install -q --upgrade pip autopep8 six
|
@$(PYTHON_ENV)/bin/pip install -q --upgrade pip autopep8 six
|
||||||
|
@# Work around pip bug. See: https://github.com/pypa/pip/issues/4464
|
||||||
|
@find $(PYTHON_ENV) -type d -name dist-packages -exec sh -c "echo dist-packages > {}.pth" ';'
|
||||||
|
|
||||||
# Tests if apm works with the current code
|
# Tests if apm works with the current code
|
||||||
.PHONY: test-apm
|
.PHONY: test-apm
|
||||||
|
1563
vendor/github.com/elastic/beats/NOTICE.txt
generated
vendored
1563
vendor/github.com/elastic/beats/NOTICE.txt
generated
vendored
File diff suppressed because it is too large
Load Diff
17
vendor/github.com/elastic/beats/README.md
generated
vendored
17
vendor/github.com/elastic/beats/README.md
generated
vendored
@ -1,5 +1,4 @@
|
|||||||
[](https://travis-ci.org/elastic/beats)
|
[](https://travis-ci.org/elastic/beats)
|
||||||
[](https://ci.appveyor.com/project/elastic-beats/beats/branch/master)
|
|
||||||
[](http://goreportcard.com/report/elastic/beats)
|
[](http://goreportcard.com/report/elastic/beats)
|
||||||
[](https://codecov.io/github/elastic/beats?branch=master)
|
[](https://codecov.io/github/elastic/beats?branch=master)
|
||||||
|
|
||||||
@ -20,6 +19,7 @@ framework for creating Beats, and all the officially supported Beats:
|
|||||||
|
|
||||||
Beat | Description
|
Beat | Description
|
||||||
--- | ---
|
--- | ---
|
||||||
|
[Auditbeat](https://github.com/elastic/beats/tree/master/auditbeat) | Collect your Linux audit framework data and monitor the integrity of your files.
|
||||||
[Filebeat](https://github.com/elastic/beats/tree/master/filebeat) | Tails and ships log files
|
[Filebeat](https://github.com/elastic/beats/tree/master/filebeat) | Tails and ships log files
|
||||||
[Heartbeat](https://github.com/elastic/beats/tree/master/heartbeat) | Ping remote services for availability
|
[Heartbeat](https://github.com/elastic/beats/tree/master/heartbeat) | Ping remote services for availability
|
||||||
[Metricbeat](https://github.com/elastic/beats/tree/master/metricbeat) | Fetches sets of metrics from the operating system and services
|
[Metricbeat](https://github.com/elastic/beats/tree/master/metricbeat) | Fetches sets of metrics from the operating system and services
|
||||||
@ -27,9 +27,9 @@ Beat | Description
|
|||||||
[Winlogbeat](https://github.com/elastic/beats/tree/master/winlogbeat) | Fetches and ships Windows Event logs
|
[Winlogbeat](https://github.com/elastic/beats/tree/master/winlogbeat) | Fetches and ships Windows Event logs
|
||||||
|
|
||||||
In addition to the above Beats, which are officially supported by
|
In addition to the above Beats, which are officially supported by
|
||||||
[Elastic](https://elastic.co), the
|
[Elastic](https://elastic.co), the community has created a set of other Beats
|
||||||
community has created a set of other Beats that make use of libbeat but live
|
that make use of libbeat but live outside of this Github repository. We maintain
|
||||||
outside of this Github repository. We maintain a list of community Beats
|
a list of community Beats
|
||||||
[here](https://www.elastic.co/guide/en/beats/libbeat/master/community-beats.html).
|
[here](https://www.elastic.co/guide/en/beats/libbeat/master/community-beats.html).
|
||||||
|
|
||||||
## Documentation and Getting Started
|
## Documentation and Getting Started
|
||||||
@ -38,6 +38,7 @@ You can find the documentation and getting started guides for each of the Beats
|
|||||||
on the [elastic.co site](https://www.elastic.co/guide/):
|
on the [elastic.co site](https://www.elastic.co/guide/):
|
||||||
|
|
||||||
* [Beats platform](https://www.elastic.co/guide/en/beats/libbeat/current/index.html)
|
* [Beats platform](https://www.elastic.co/guide/en/beats/libbeat/current/index.html)
|
||||||
|
* [Auditbeat](https://www.elastic.co/guide/en/beats/auditbeat/current/index.html)
|
||||||
* [Filebeat](https://www.elastic.co/guide/en/beats/filebeat/current/index.html)
|
* [Filebeat](https://www.elastic.co/guide/en/beats/filebeat/current/index.html)
|
||||||
* [Heartbeat](https://www.elastic.co/guide/en/beats/heartbeat/current/index.html)
|
* [Heartbeat](https://www.elastic.co/guide/en/beats/heartbeat/current/index.html)
|
||||||
* [Metricbeat](https://www.elastic.co/guide/en/beats/metricbeat/current/index.html)
|
* [Metricbeat](https://www.elastic.co/guide/en/beats/metricbeat/current/index.html)
|
||||||
@ -65,8 +66,8 @@ create your own Beat.
|
|||||||
Please start by reading our [CONTRIBUTING](CONTRIBUTING.md) file.
|
Please start by reading our [CONTRIBUTING](CONTRIBUTING.md) file.
|
||||||
|
|
||||||
If you are creating a new Beat, you don't need to submit the code to this
|
If you are creating a new Beat, you don't need to submit the code to this
|
||||||
repository. You can simply start working in a new repository and make use of
|
repository. You can simply start working in a new repository and make use of the
|
||||||
the libbeat packages, by following our [developer
|
libbeat packages, by following our [developer
|
||||||
guide](https://www.elastic.co/guide/en/beats/libbeat/current/new-beat.html).
|
guide](https://www.elastic.co/guide/en/beats/libbeat/current/new-beat.html).
|
||||||
After you have a working prototype, open a pull request to add your Beat to the
|
After you have a working prototype, open a pull request to add your Beat to the
|
||||||
list of [community
|
list of [community
|
||||||
@ -74,5 +75,5 @@ Beats](https://github.com/elastic/beats/blob/master/libbeat/docs/communitybeats.
|
|||||||
|
|
||||||
## Building Beats from the Source
|
## Building Beats from the Source
|
||||||
|
|
||||||
See our [CONTRIBUTING](CONTRIBUTING.md) file for information about setting up your dev
|
See our [CONTRIBUTING](CONTRIBUTING.md) file for information about setting up
|
||||||
environment to build Beats from the source.
|
your dev environment to build Beats from the source.
|
||||||
|
2
vendor/github.com/elastic/beats/Vagrantfile
generated
vendored
2
vendor/github.com/elastic/beats/Vagrantfile
generated
vendored
@ -63,7 +63,7 @@ if [ ! -e "~/bin/gvm" ]; then
|
|||||||
chmod +x ~/bin/gvm
|
chmod +x ~/bin/gvm
|
||||||
echo 'export GOPATH=$HOME/go' >> ~/.bash_profile
|
echo 'export GOPATH=$HOME/go' >> ~/.bash_profile
|
||||||
echo 'export PATH=$HOME/bin:$GOPATH/bin:$PATH' >> ~/.bash_profile
|
echo 'export PATH=$HOME/bin:$GOPATH/bin:$PATH' >> ~/.bash_profile
|
||||||
echo 'eval "$(gvm 1.9.2)"' >> ~/.bash_profile
|
echo 'eval "$(gvm 1.9.4)"' >> ~/.bash_profile
|
||||||
fi
|
fi
|
||||||
SCRIPT
|
SCRIPT
|
||||||
|
|
||||||
|
17
vendor/github.com/elastic/beats/auditbeat/Dockerfile
generated
vendored
Normal file
17
vendor/github.com/elastic/beats/auditbeat/Dockerfile
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
FROM golang:1.9.2
|
||||||
|
MAINTAINER Nicolas Ruflin <ruflin@elastic.co>
|
||||||
|
|
||||||
|
RUN set -x && \
|
||||||
|
apt-get update && \
|
||||||
|
apt-get install -y --no-install-recommends \
|
||||||
|
netcat python-pip virtualenv && \
|
||||||
|
apt-get clean
|
||||||
|
|
||||||
|
RUN pip install --upgrade setuptools
|
||||||
|
|
||||||
|
# Setup work environment
|
||||||
|
ENV AUDITBEAT_PATH /go/src/github.com/elastic/beats/auditbeat
|
||||||
|
|
||||||
|
RUN mkdir -p $AUDITBEAT_PATH/build/coverage
|
||||||
|
WORKDIR $AUDITBEAT_PATH
|
||||||
|
HEALTHCHECK CMD exit 0
|
20
vendor/github.com/elastic/beats/auditbeat/Makefile
generated
vendored
20
vendor/github.com/elastic/beats/auditbeat/Makefile
generated
vendored
@ -1,13 +1,15 @@
|
|||||||
BEAT_NAME=auditbeat
|
BEAT_NAME=auditbeat
|
||||||
BEAT_TITLE=Auditbeat
|
BEAT_TITLE=Auditbeat
|
||||||
BEAT_DESCRIPTION=Audit the activities of users and processes on your system.
|
BEAT_DESCRIPTION=Audit the activities of users and processes on your system.
|
||||||
SYSTEM_TESTS=false
|
SYSTEM_TESTS=true
|
||||||
TEST_ENVIRONMENT=false
|
TEST_ENVIRONMENT?=true
|
||||||
GOX_OS?=linux windows ## @Building List of all OS to be supported by "make crosscompile".
|
GOX_OS?=linux windows ## @Building List of all OS to be supported by "make crosscompile".
|
||||||
DEV_OS?=linux
|
DEV_OS?=linux
|
||||||
|
TESTING_ENVIRONMENT?=latest
|
||||||
|
ES_BEATS?=..
|
||||||
|
|
||||||
# Path to the libbeat Makefile
|
# Path to the libbeat Makefile
|
||||||
-include ../libbeat/scripts/Makefile
|
include ${ES_BEATS}/libbeat/scripts/Makefile
|
||||||
|
|
||||||
# This is called by the beats packer before building starts
|
# This is called by the beats packer before building starts
|
||||||
.PHONY: before-build
|
.PHONY: before-build
|
||||||
@ -33,7 +35,7 @@ before-build:
|
|||||||
${PREFIX}/${BEAT_NAME}-darwin.reference.yml
|
${PREFIX}/${BEAT_NAME}-darwin.reference.yml
|
||||||
|
|
||||||
@cat ${ES_BEATS}/auditbeat/_meta/common.p1.yml \
|
@cat ${ES_BEATS}/auditbeat/_meta/common.p1.yml \
|
||||||
<(go run scripts/generate_config.go -os linux -concat) \
|
<(go run scripts/generate_config.go -os linux -arch amd64 -concat) \
|
||||||
${ES_BEATS}/auditbeat/_meta/common.p2.yml \
|
${ES_BEATS}/auditbeat/_meta/common.p2.yml \
|
||||||
${ES_BEATS}/libbeat/_meta/config.yml > \
|
${ES_BEATS}/libbeat/_meta/config.yml > \
|
||||||
${PREFIX}/${BEAT_NAME}-linux.yml
|
${PREFIX}/${BEAT_NAME}-linux.yml
|
||||||
@ -42,6 +44,16 @@ before-build:
|
|||||||
${ES_BEATS}/libbeat/_meta/config.reference.yml > \
|
${ES_BEATS}/libbeat/_meta/config.reference.yml > \
|
||||||
${PREFIX}/${BEAT_NAME}-linux.reference.yml
|
${PREFIX}/${BEAT_NAME}-linux.reference.yml
|
||||||
|
|
||||||
|
@cat ${ES_BEATS}/auditbeat/_meta/common.p1.yml \
|
||||||
|
<(go run scripts/generate_config.go -os linux -arch i386 -concat) \
|
||||||
|
${ES_BEATS}/auditbeat/_meta/common.p2.yml \
|
||||||
|
${ES_BEATS}/libbeat/_meta/config.yml > \
|
||||||
|
${PREFIX}/${BEAT_NAME}-linux-386.yml
|
||||||
|
@cat ${ES_BEATS}/auditbeat/_meta/common.reference.yml \
|
||||||
|
<(go run scripts/generate_config.go -os linux -concat -ref) \
|
||||||
|
${ES_BEATS}/libbeat/_meta/config.reference.yml > \
|
||||||
|
${PREFIX}/${BEAT_NAME}-linux-386.reference.yml
|
||||||
|
|
||||||
# Collects all dependencies and then calls update
|
# Collects all dependencies and then calls update
|
||||||
.PHONY: collect
|
.PHONY: collect
|
||||||
collect: fields collect-docs configs kibana
|
collect: fields collect-docs configs kibana
|
||||||
|
114
vendor/github.com/elastic/beats/auditbeat/auditbeat.reference.yml
generated
vendored
114
vendor/github.com/elastic/beats/auditbeat/auditbeat.reference.yml
generated
vendored
@ -139,7 +139,8 @@ auditbeat.modules:
|
|||||||
|
|
||||||
# Hints the minimum number of events stored in the queue,
|
# Hints the minimum number of events stored in the queue,
|
||||||
# before providing a batch of events to the outputs.
|
# before providing a batch of events to the outputs.
|
||||||
# A value of 0 (the default) ensures events are immediately available
|
# The default value is set to 2048.
|
||||||
|
# A value of 0 ensures events are immediately available
|
||||||
# to be sent to the outputs.
|
# to be sent to the outputs.
|
||||||
#flush.min_events: 2048
|
#flush.min_events: 2048
|
||||||
|
|
||||||
@ -147,6 +148,66 @@ auditbeat.modules:
|
|||||||
# if the number of events stored in the queue is < min_flush_events.
|
# if the number of events stored in the queue is < min_flush_events.
|
||||||
#flush.timeout: 1s
|
#flush.timeout: 1s
|
||||||
|
|
||||||
|
# The spool queue will store events in a local spool file, before
|
||||||
|
# forwarding the events to the outputs.
|
||||||
|
#
|
||||||
|
# Beta: spooling to disk is currently a beta feature. Use with care.
|
||||||
|
#
|
||||||
|
# The spool file is a circular buffer, which blocks once the file/buffer is full.
|
||||||
|
# Events are put into a write buffer and flushed once the write buffer
|
||||||
|
# is full or the flush_timeout is triggered.
|
||||||
|
# Once ACKed by the output, events are removed immediately from the queue,
|
||||||
|
# making space for new events to be persisted.
|
||||||
|
#spool:
|
||||||
|
# The file namespace configures the file path and the file creation settings.
|
||||||
|
# Once the file exists, the `size`, `page_size` and `prealloc` settings
|
||||||
|
# will have no more effect.
|
||||||
|
#file:
|
||||||
|
# Location of spool file. The default value is ${path.data}/spool.dat.
|
||||||
|
#path: "${path.data}/spool.dat"
|
||||||
|
|
||||||
|
# Configure file permissions if file is created. The default value is 0600.
|
||||||
|
#permissions: 0600
|
||||||
|
|
||||||
|
# File size hint. The spool blocks, once this limit is reached. The default value is 100 MiB.
|
||||||
|
#size: 100MiB
|
||||||
|
|
||||||
|
# The files page size. A file is split into multiple pages of the same size. The default value is 4KiB.
|
||||||
|
#page_size: 4KiB
|
||||||
|
|
||||||
|
# If prealloc is set, the required space for the file is reserved using
|
||||||
|
# truncate. The default value is true.
|
||||||
|
#prealloc: true
|
||||||
|
|
||||||
|
# Spool writer settings
|
||||||
|
# Events are serialized into a write buffer. The write buffer is flushed if:
|
||||||
|
# - The buffer limit has been reached.
|
||||||
|
# - The configured limit of buffered events is reached.
|
||||||
|
# - The flush timeout is triggered.
|
||||||
|
#write:
|
||||||
|
# Sets the write buffer size.
|
||||||
|
#buffer_size: 1MiB
|
||||||
|
|
||||||
|
# Maximum duration after which events are flushed, if the write buffer
|
||||||
|
# is not full yet. The default value is 1s.
|
||||||
|
#flush.timeout: 1s
|
||||||
|
|
||||||
|
# Number of maximum buffered events. The write buffer is flushed once the
|
||||||
|
# limit is reached.
|
||||||
|
#flush.events: 16384
|
||||||
|
|
||||||
|
# Configure the on-disk event encoding. The encoding can be changed
|
||||||
|
# between restarts.
|
||||||
|
# Valid encodings are: json, ubjson, and cbor.
|
||||||
|
#codec: cbor
|
||||||
|
#read:
|
||||||
|
# Reader flush timeout, waiting for more events to become available, so
|
||||||
|
# to fill a complete batch, as required by the outputs.
|
||||||
|
# If flush_timeout is 0, all available events are forwarded to the
|
||||||
|
# outputs immediately.
|
||||||
|
# The default value is 0s.
|
||||||
|
#flush.timeout: 0s
|
||||||
|
|
||||||
# Sets the maximum number of CPUs that can be executing simultaneously. The
|
# Sets the maximum number of CPUs that can be executing simultaneously. The
|
||||||
# default is the number of logical CPUs available in the system.
|
# default is the number of logical CPUs available in the system.
|
||||||
#max_procs:
|
#max_procs:
|
||||||
@ -181,6 +242,14 @@ auditbeat.modules:
|
|||||||
# equals:
|
# equals:
|
||||||
# http.code: 200
|
# http.code: 200
|
||||||
#
|
#
|
||||||
|
# The following example renames the field a to b:
|
||||||
|
#
|
||||||
|
#processors:
|
||||||
|
#- rename:
|
||||||
|
# fields:
|
||||||
|
# - from: "a"
|
||||||
|
# to: "b"
|
||||||
|
#
|
||||||
# The following example enriches each event with metadata from the cloud
|
# The following example enriches each event with metadata from the cloud
|
||||||
# provider about the host machine. It works on EC2, GCE, DigitalOcean,
|
# provider about the host machine. It works on EC2, GCE, DigitalOcean,
|
||||||
# Tencent Cloud, and Alibaba Cloud.
|
# Tencent Cloud, and Alibaba Cloud.
|
||||||
@ -205,6 +274,7 @@ auditbeat.modules:
|
|||||||
# match_pids: ["process.pid", "process.ppid"]
|
# match_pids: ["process.pid", "process.ppid"]
|
||||||
# match_source: true
|
# match_source: true
|
||||||
# match_source_index: 4
|
# match_source_index: 4
|
||||||
|
# match_short_id: false
|
||||||
# cleanup_timeout: 60
|
# cleanup_timeout: 60
|
||||||
# # To connect to Docker over TLS you must specify a client and CA certificate.
|
# # To connect to Docker over TLS you must specify a client and CA certificate.
|
||||||
# #ssl:
|
# #ssl:
|
||||||
@ -218,6 +288,7 @@ auditbeat.modules:
|
|||||||
#
|
#
|
||||||
#processors:
|
#processors:
|
||||||
#- add_docker_metadata: ~
|
#- add_docker_metadata: ~
|
||||||
|
#- add_host_metadata: ~
|
||||||
|
|
||||||
#============================= Elastic Cloud ==================================
|
#============================= Elastic Cloud ==================================
|
||||||
|
|
||||||
@ -290,7 +361,18 @@ output.elasticsearch:
|
|||||||
# The default is 50.
|
# The default is 50.
|
||||||
#bulk_max_size: 50
|
#bulk_max_size: 50
|
||||||
|
|
||||||
# Configure http request timeout before failing an request to Elasticsearch.
|
# The number of seconds to wait before trying to reconnect to Elasticsearch
|
||||||
|
# after a network error. After waiting backoff.init seconds, the Beat
|
||||||
|
# tries to reconnect. If the attempt fails, the backoff timer is increased
|
||||||
|
# exponentially up to backoff.max. After a successful connection, the backoff
|
||||||
|
# timer is reset. The default is 1s.
|
||||||
|
#backoff.init: 1s
|
||||||
|
|
||||||
|
# The maximum number of seconds to wait before attempting to connect to
|
||||||
|
# Elasticsearch after a network error. The default is 60s.
|
||||||
|
#backoff.max: 60s
|
||||||
|
|
||||||
|
# Configure http request timeout before failing a request to Elasticsearch.
|
||||||
#timeout: 90
|
#timeout: 90
|
||||||
|
|
||||||
# Use SSL settings for HTTPS.
|
# Use SSL settings for HTTPS.
|
||||||
@ -354,7 +436,7 @@ output.elasticsearch:
|
|||||||
# Optional load balance the events between the Logstash hosts. Default is false.
|
# Optional load balance the events between the Logstash hosts. Default is false.
|
||||||
#loadbalance: false
|
#loadbalance: false
|
||||||
|
|
||||||
# Number of batches to be sent asynchronously to logstash while processing
|
# Number of batches to be sent asynchronously to Logstash while processing
|
||||||
# new batches.
|
# new batches.
|
||||||
#pipelining: 2
|
#pipelining: 2
|
||||||
|
|
||||||
@ -363,6 +445,17 @@ output.elasticsearch:
|
|||||||
# if no error is encountered.
|
# if no error is encountered.
|
||||||
#slow_start: false
|
#slow_start: false
|
||||||
|
|
||||||
|
# The number of seconds to wait before trying to reconnect to Logstash
|
||||||
|
# after a network error. After waiting backoff.init seconds, the Beat
|
||||||
|
# tries to reconnect. If the attempt fails, the backoff timer is increased
|
||||||
|
# exponentially up to backoff.max. After a successful connection, the backoff
|
||||||
|
# timer is reset. The default is 1s.
|
||||||
|
#backoff.init: 1s
|
||||||
|
|
||||||
|
# The maximum number of seconds to wait before attempting to connect to
|
||||||
|
# Logstash after a network error. The default is 60s.
|
||||||
|
#backoff.max: 60s
|
||||||
|
|
||||||
# Optional index name. The default index name is set to auditbeat
|
# Optional index name. The default index name is set to auditbeat
|
||||||
# in all lowercase.
|
# in all lowercase.
|
||||||
#index: 'auditbeat'
|
#index: 'auditbeat'
|
||||||
@ -707,6 +800,10 @@ output.elasticsearch:
|
|||||||
# the default for the logs path is a logs subdirectory inside the home path.
|
# the default for the logs path is a logs subdirectory inside the home path.
|
||||||
#path.logs: ${path.home}/logs
|
#path.logs: ${path.home}/logs
|
||||||
|
|
||||||
|
#================================ Keystore ==========================================
|
||||||
|
# Location of the Keystore containing the keys and their sensitive values.
|
||||||
|
#keystore.path: "${path.config}/beats.keystore"
|
||||||
|
|
||||||
#============================== Dashboards =====================================
|
#============================== Dashboards =====================================
|
||||||
# These settings control loading the sample dashboards to the Kibana index. Loading
|
# These settings control loading the sample dashboards to the Kibana index. Loading
|
||||||
# the dashboards are disabled by default and can be enabled either by setting the
|
# the dashboards are disabled by default and can be enabled either by setting the
|
||||||
@ -741,6 +838,17 @@ output.elasticsearch:
|
|||||||
# how to install the dashboards by first querying Elasticsearch.
|
# how to install the dashboards by first querying Elasticsearch.
|
||||||
#setup.dashboards.always_kibana: false
|
#setup.dashboards.always_kibana: false
|
||||||
|
|
||||||
|
# If true and Kibana is not reachable at the time when dashboards are loaded,
|
||||||
|
# it will retry to reconnect to Kibana instead of exiting with an error.
|
||||||
|
#setup.dashboards.retry.enabled: false
|
||||||
|
|
||||||
|
# Duration interval between Kibana connection retries.
|
||||||
|
#setup.dashboards.retry.interval: 1s
|
||||||
|
|
||||||
|
# Maximum number of retries before exiting with an error, 0 for unlimited retrying.
|
||||||
|
#setup.dashboards.retry.maximum: 0
|
||||||
|
|
||||||
|
|
||||||
#============================== Template =====================================
|
#============================== Template =====================================
|
||||||
|
|
||||||
# A template is used to set the mapping in Elasticsearch
|
# A template is used to set the mapping in Elasticsearch
|
||||||
|
33
vendor/github.com/elastic/beats/auditbeat/docker-compose.yml
generated
vendored
Normal file
33
vendor/github.com/elastic/beats/auditbeat/docker-compose.yml
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
version: '2.1'
|
||||||
|
services:
|
||||||
|
beat:
|
||||||
|
build: ${PWD}/.
|
||||||
|
depends_on:
|
||||||
|
- proxy_dep
|
||||||
|
env_file:
|
||||||
|
- ${PWD}/build/test.env
|
||||||
|
working_dir: /go/src/github.com/elastic/beats/auditbeat
|
||||||
|
environment:
|
||||||
|
- KIBANA_HOST=kibana
|
||||||
|
- KIBANA_PORT=5601
|
||||||
|
volumes:
|
||||||
|
- ${PWD}/..:/go/src/github.com/elastic/beats/
|
||||||
|
command: make
|
||||||
|
|
||||||
|
# This is a proxy used to block beats until all services are healthy.
|
||||||
|
# See: https://github.com/docker/compose/issues/4369
|
||||||
|
proxy_dep:
|
||||||
|
image: busybox
|
||||||
|
depends_on:
|
||||||
|
elasticsearch: { condition: service_healthy }
|
||||||
|
kibana: { condition: service_healthy }
|
||||||
|
|
||||||
|
elasticsearch:
|
||||||
|
extends:
|
||||||
|
file: ../testing/environments/${TESTING_ENVIRONMENT}.yml
|
||||||
|
service: elasticsearch
|
||||||
|
|
||||||
|
kibana:
|
||||||
|
extends:
|
||||||
|
file: ../testing/environments/${TESTING_ENVIRONMENT}.yml
|
||||||
|
service: kibana
|
1
vendor/github.com/elastic/beats/auditbeat/docs/configuring-howto.asciidoc
generated
vendored
1
vendor/github.com/elastic/beats/auditbeat/docs/configuring-howto.asciidoc
generated
vendored
@ -77,4 +77,3 @@ include::../../libbeat/docs/yaml.asciidoc[]
|
|||||||
include::../../libbeat/docs/regexp.asciidoc[]
|
include::../../libbeat/docs/regexp.asciidoc[]
|
||||||
|
|
||||||
include::../../libbeat/docs/reference-yml.asciidoc[]
|
include::../../libbeat/docs/reference-yml.asciidoc[]
|
||||||
|
|
||||||
|
2147
vendor/github.com/elastic/beats/auditbeat/docs/fields.asciidoc
generated
vendored
2147
vendor/github.com/elastic/beats/auditbeat/docs/fields.asciidoc
generated
vendored
File diff suppressed because it is too large
Load Diff
18
vendor/github.com/elastic/beats/auditbeat/docs/getting-started.asciidoc
generated
vendored
18
vendor/github.com/elastic/beats/auditbeat/docs/getting-started.asciidoc
generated
vendored
@ -1,18 +1,7 @@
|
|||||||
[id="{beatname_lc}-getting-started"]
|
[id="{beatname_lc}-getting-started"]
|
||||||
== Getting started with {beatname_uc}
|
== Getting started with {beatname_uc}
|
||||||
|
|
||||||
To get started with your own {beatname_uc} setup, install and configure these
|
include::../../libbeat/docs/shared-getting-started-intro.asciidoc[]
|
||||||
related products:
|
|
||||||
|
|
||||||
* Elasticsearch for storage and indexing the data.
|
|
||||||
* Kibana for the UI.
|
|
||||||
* Logstash (optional) for inserting data into Elasticsearch.
|
|
||||||
|
|
||||||
See {libbeat}/getting-started.html[Getting Started with Beats and the Elastic Stack]
|
|
||||||
for more information.
|
|
||||||
|
|
||||||
After installing the Elastic Stack, read the following topics to learn how to
|
|
||||||
install, configure, and run {beatname_uc}:
|
|
||||||
|
|
||||||
* <<{beatname_lc}-installation>>
|
* <<{beatname_lc}-installation>>
|
||||||
* <<{beatname_lc}-configuration>>
|
* <<{beatname_lc}-configuration>>
|
||||||
@ -25,7 +14,7 @@ install, configure, and run {beatname_uc}:
|
|||||||
[id="{beatname_lc}-installation"]
|
[id="{beatname_lc}-installation"]
|
||||||
=== Step 1: Install {beatname_uc}
|
=== Step 1: Install {beatname_uc}
|
||||||
|
|
||||||
You should install {beatname_uc} on all the servers you want to monitor.
|
Install {beatname_uc} on all the servers you want to monitor.
|
||||||
|
|
||||||
include::../../libbeat/docs/shared-download-and-install.asciidoc[]
|
include::../../libbeat/docs/shared-download-and-install.asciidoc[]
|
||||||
|
|
||||||
@ -123,8 +112,7 @@ https://www.elastic.co/downloads/beats/{beatname_lc}[downloads page].
|
|||||||
. Rename the +{beatname_lc}-<version>-windows+ directory to +{beatname_uc}+.
|
. Rename the +{beatname_lc}-<version>-windows+ directory to +{beatname_uc}+.
|
||||||
|
|
||||||
. Open a PowerShell prompt as an Administrator (right-click the PowerShell icon
|
. Open a PowerShell prompt as an Administrator (right-click the PowerShell icon
|
||||||
and select *Run As Administrator*). If you are running Windows XP, you may need
|
and select *Run As Administrator*).
|
||||||
to download and install PowerShell.
|
|
||||||
|
|
||||||
. From the PowerShell prompt, run the following commands to install {beatname_uc}
|
. From the PowerShell prompt, run the following commands to install {beatname_uc}
|
||||||
as a Windows service:
|
as a Windows service:
|
||||||
|
11
vendor/github.com/elastic/beats/auditbeat/docs/index.asciidoc
generated
vendored
11
vendor/github.com/elastic/beats/auditbeat/docs/index.asciidoc
generated
vendored
@ -2,19 +2,21 @@
|
|||||||
|
|
||||||
include::../../libbeat/docs/version.asciidoc[]
|
include::../../libbeat/docs/version.asciidoc[]
|
||||||
|
|
||||||
include::{asciidoc-dir}/../../shared/attributes62.asciidoc[]
|
include::{asciidoc-dir}/../../shared/attributes.asciidoc[]
|
||||||
|
|
||||||
:version: {stack-version}
|
:version: {stack-version}
|
||||||
:beatname_lc: auditbeat
|
:beatname_lc: auditbeat
|
||||||
:beatname_uc: Auditbeat
|
:beatname_uc: Auditbeat
|
||||||
:beatname_pkg: {beatname_lc}
|
:beatname_pkg: {beatname_lc}
|
||||||
|
:github_repo_name: beats
|
||||||
|
:discuss_forum: beats/{beatname_lc}
|
||||||
|
:beat_default_index_prefix: {beatname_lc}
|
||||||
|
:has_ml_jobs: yes
|
||||||
|
|
||||||
include::../../libbeat/docs/shared-beats-attributes.asciidoc[]
|
include::../../libbeat/docs/shared-beats-attributes.asciidoc[]
|
||||||
|
|
||||||
include::./overview.asciidoc[]
|
include::./overview.asciidoc[]
|
||||||
|
|
||||||
include::../../libbeat/docs/contributing-to-beats.asciidoc[]
|
|
||||||
|
|
||||||
include::./getting-started.asciidoc[]
|
include::./getting-started.asciidoc[]
|
||||||
|
|
||||||
include::../../libbeat/docs/repositories.asciidoc[]
|
include::../../libbeat/docs/repositories.asciidoc[]
|
||||||
@ -38,3 +40,6 @@ include::../../libbeat/docs/security/securing-beats.asciidoc[]
|
|||||||
include::./troubleshooting.asciidoc[]
|
include::./troubleshooting.asciidoc[]
|
||||||
|
|
||||||
include::./faq.asciidoc[]
|
include::./faq.asciidoc[]
|
||||||
|
|
||||||
|
include::../../libbeat/docs/contributing-to-beats.asciidoc[]
|
||||||
|
|
||||||
|
1
vendor/github.com/elastic/beats/auditbeat/docs/securing-auditbeat.asciidoc
generated
vendored
1
vendor/github.com/elastic/beats/auditbeat/docs/securing-auditbeat.asciidoc
generated
vendored
@ -9,6 +9,7 @@ and other products in the Elastic stack:
|
|||||||
|
|
||||||
* <<securing-communication-elasticsearch>>
|
* <<securing-communication-elasticsearch>>
|
||||||
* <<configuring-ssl-logstash>>
|
* <<configuring-ssl-logstash>>
|
||||||
|
* <<securing-beats>>
|
||||||
|
|
||||||
//sets block macro for https.asciidoc included in next section
|
//sets block macro for https.asciidoc included in next section
|
||||||
|
|
||||||
|
4
vendor/github.com/elastic/beats/auditbeat/docs/setting-up-running.asciidoc
generated
vendored
4
vendor/github.com/elastic/beats/auditbeat/docs/setting-up-running.asciidoc
generated
vendored
@ -4,7 +4,7 @@
|
|||||||
// that is unique to each beat.
|
// that is unique to each beat.
|
||||||
/////
|
/////
|
||||||
|
|
||||||
[[seting-up-and-running]]
|
[[setting-up-and-running]]
|
||||||
== Setting up and running {beatname_uc}
|
== Setting up and running {beatname_uc}
|
||||||
|
|
||||||
Before reading this section, see the
|
Before reading this section, see the
|
||||||
@ -30,3 +30,5 @@ include::../../libbeat/docs/keystore.asciidoc[]
|
|||||||
include::../../libbeat/docs/command-reference.asciidoc[]
|
include::../../libbeat/docs/command-reference.asciidoc[]
|
||||||
|
|
||||||
include::./running-on-docker.asciidoc[]
|
include::./running-on-docker.asciidoc[]
|
||||||
|
|
||||||
|
include::../../libbeat/docs/shared-shutdown.asciidoc[]
|
||||||
|
10
vendor/github.com/elastic/beats/auditbeat/module/auditd/_meta/config.yml.tpl
generated
vendored
10
vendor/github.com/elastic/beats/auditbeat/module/auditd/_meta/config.yml.tpl
generated
vendored
@ -17,17 +17,19 @@
|
|||||||
## Create file watches (-w) or syscall audits (-a or -A). Uncomment these
|
## Create file watches (-w) or syscall audits (-a or -A). Uncomment these
|
||||||
## examples or add your own rules.
|
## examples or add your own rules.
|
||||||
|
|
||||||
|
{{ if eq .goarch "amd64" -}}
|
||||||
## If you are on a 64 bit platform, everything should be running
|
## If you are on a 64 bit platform, everything should be running
|
||||||
## in 64 bit mode. This rule will detect any use of the 32 bit syscalls
|
## in 64 bit mode. This rule will detect any use of the 32 bit syscalls
|
||||||
## because this might be a sign of someone exploiting a hole in the 32
|
## because this might be a sign of someone exploiting a hole in the 32
|
||||||
## bit API.
|
## bit API.
|
||||||
#-a always,exit -F arch=b32 -S all -F key=32bit-abi
|
#-a always,exit -F arch=b32 -S all -F key=32bit-abi
|
||||||
|
|
||||||
|
{{ end -}}
|
||||||
## Executions.
|
## Executions.
|
||||||
#-a always,exit -F arch=b64 -S execve,execveat -k exec
|
#-a always,exit -F arch=b{{.arch_bits}} -S execve,execveat -k exec
|
||||||
|
|
||||||
## External access (warning: these can be expensive to audit).
|
## External access (warning: these can be expensive to audit).
|
||||||
#-a always,exit -F arch=b64 -S accept,bind,connect -F key=external-access
|
#-a always,exit -F arch=b{{.arch_bits}} -S accept,bind,connect -F key=external-access
|
||||||
|
|
||||||
## Identity changes.
|
## Identity changes.
|
||||||
#-w /etc/group -p wa -k identity
|
#-w /etc/group -p wa -k identity
|
||||||
@ -35,6 +37,6 @@
|
|||||||
#-w /etc/gshadow -p wa -k identity
|
#-w /etc/gshadow -p wa -k identity
|
||||||
|
|
||||||
## Unauthorized access attempts.
|
## Unauthorized access attempts.
|
||||||
#-a always,exit -F arch=b64 -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EACCES -k access
|
#-a always,exit -F arch=b{{.arch_bits}} -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EACCES -k access
|
||||||
#-a always,exit -F arch=b64 -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EPERM -k access
|
#-a always,exit -F arch=b{{.arch_bits}} -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EPERM -k access
|
||||||
{{ end -}}
|
{{ end -}}
|
||||||
|
@ -84,7 +84,7 @@
|
|||||||
"searchSourceJSON": "{\"query\":{\"language\":\"lucene\",\"query\":\"\"},\"filter\":[],\"highlightAll\":true,\"version\":true}"
|
"searchSourceJSON": "{\"query\":{\"language\":\"lucene\",\"query\":\"\"},\"filter\":[],\"highlightAll\":true,\"version\":true}"
|
||||||
},
|
},
|
||||||
"optionsJSON": "{\"darkTheme\":false,\"useMargins\":false}",
|
"optionsJSON": "{\"darkTheme\":false,\"useMargins\":false}",
|
||||||
"panelsJSON": "[{\"gridData\":{\"h\":3,\"i\":\"1\",\"w\":4,\"x\":4,\"y\":0},\"id\":\"20a8e8d0-c1c8-11e7-8995-936807a28b16\",\"panelIndex\":\"1\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"gridData\":{\"h\":3,\"i\":\"3\",\"w\":4,\"x\":8,\"y\":0},\"id\":\"f81a6de0-c1c1-11e7-8995-936807a28b16\",\"panelIndex\":\"3\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"gridData\":{\"h\":3,\"i\":\"5\",\"w\":4,\"x\":0,\"y\":0},\"id\":\"2efac370-c1ca-11e7-8995-936807a28b16\",\"panelIndex\":\"5\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"gridData\":{\"h\":5,\"i\":\"6\",\"w\":12,\"x\":0,\"y\":3},\"id\":\"d382f5b0-c1c6-11e7-8995-936807a28b16\",\"panelIndex\":\"6\",\"type\":\"search\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"}]",
|
"panelsJSON": "[{\"gridData\":{\"h\":3,\"i\":\"1\",\"w\":4,\"x\":4,\"y\":0},\"id\":\"20a8e8d0-c1c8-11e7-8995-936807a28b16\",\"panelIndex\":\"1\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"gridData\":{\"h\":3,\"i\":\"3\",\"w\":4,\"x\":8,\"y\":0},\"id\":\"f81a6de0-c1c1-11e7-8995-936807a28b16\",\"panelIndex\":\"3\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"gridData\":{\"h\":3,\"i\":\"5\",\"w\":4,\"x\":0,\"y\":0},\"id\":\"2efac370-c1ca-11e7-8995-936807a28b16\",\"panelIndex\":\"5\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"gridData\":{\"h\":5,\"i\":\"6\",\"w\":12,\"x\":0,\"y\":3},\"id\":\"d382f5b0-c1c6-11e7-8995-936807a28b16\",\"panelIndex\":\"6\",\"type\":\"search\",\"version\":\"6.2.4\"}]",
|
||||||
"timeRestore": false,
|
"timeRestore": false,
|
||||||
"title": "[Auditbeat Auditd] Executions",
|
"title": "[Auditbeat Auditd] Executions",
|
||||||
"version": 1
|
"version": 1
|
||||||
@ -95,5 +95,5 @@
|
|||||||
"version": 5
|
"version": 5
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"version": "7.0.0-alpha1-SNAPSHOT"
|
"version": "6.2.4"
|
||||||
}
|
}
|
||||||
|
@ -71,7 +71,7 @@
|
|||||||
"searchSourceJSON": "{\"query\":{\"language\":\"lucene\",\"query\":\"\"},\"filter\":[],\"highlightAll\":true,\"version\":true}"
|
"searchSourceJSON": "{\"query\":{\"language\":\"lucene\",\"query\":\"\"},\"filter\":[],\"highlightAll\":true,\"version\":true}"
|
||||||
},
|
},
|
||||||
"optionsJSON": "{\"darkTheme\":false,\"useMargins\":false}",
|
"optionsJSON": "{\"darkTheme\":false,\"useMargins\":false}",
|
||||||
"panelsJSON": "[{\"gridData\":{\"h\":3,\"i\":\"1\",\"w\":7,\"x\":0,\"y\":0},\"id\":\"97680df0-c1c0-11e7-8995-936807a28b16\",\"panelIndex\":\"1\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"gridData\":{\"h\":3,\"i\":\"4\",\"w\":5,\"x\":7,\"y\":0},\"id\":\"08679220-c25a-11e7-8692-232bd1143e8a\",\"panelIndex\":\"4\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"gridData\":{\"h\":5,\"i\":\"5\",\"w\":12,\"x\":0,\"y\":3},\"id\":\"0f10c430-c1c3-11e7-8995-936807a28b16\",\"panelIndex\":\"5\",\"type\":\"search\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"}]",
|
"panelsJSON": "[{\"gridData\":{\"h\":3,\"i\":\"1\",\"w\":7,\"x\":0,\"y\":0},\"id\":\"97680df0-c1c0-11e7-8995-936807a28b16\",\"panelIndex\":\"1\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"gridData\":{\"h\":3,\"i\":\"4\",\"w\":5,\"x\":7,\"y\":0},\"id\":\"08679220-c25a-11e7-8692-232bd1143e8a\",\"panelIndex\":\"4\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"gridData\":{\"h\":5,\"i\":\"5\",\"w\":12,\"x\":0,\"y\":3},\"id\":\"0f10c430-c1c3-11e7-8995-936807a28b16\",\"panelIndex\":\"5\",\"type\":\"search\",\"version\":\"6.2.4\"}]",
|
||||||
"timeRestore": false,
|
"timeRestore": false,
|
||||||
"title": "[Auditbeat Auditd] Overview",
|
"title": "[Auditbeat Auditd] Overview",
|
||||||
"version": 1
|
"version": 1
|
||||||
@ -82,5 +82,5 @@
|
|||||||
"version": 5
|
"version": 5
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"version": "7.0.0-alpha1-SNAPSHOT"
|
"version": "6.2.4"
|
||||||
}
|
}
|
||||||
|
@ -173,7 +173,7 @@
|
|||||||
"searchSourceJSON": "{\"query\":{\"language\":\"lucene\",\"query\":\"*\"},\"filter\":[],\"highlightAll\":true,\"version\":true}"
|
"searchSourceJSON": "{\"query\":{\"language\":\"lucene\",\"query\":\"*\"},\"filter\":[],\"highlightAll\":true,\"version\":true}"
|
||||||
},
|
},
|
||||||
"optionsJSON": "{\"darkTheme\":false,\"useMargins\":false}",
|
"optionsJSON": "{\"darkTheme\":false,\"useMargins\":false}",
|
||||||
"panelsJSON": "[{\"embeddableConfig\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}},\"gridData\":{\"h\":4,\"i\":\"1\",\"w\":6,\"x\":6,\"y\":3},\"id\":\"faf882f0-c242-11e7-8692-232bd1143e8a\",\"panelIndex\":\"1\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"embeddableConfig\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}},\"gridData\":{\"h\":5,\"i\":\"2\",\"w\":6,\"x\":0,\"y\":7},\"id\":\"ea483730-c246-11e7-8692-232bd1143e8a\",\"panelIndex\":\"2\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"embeddableConfig\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}},\"gridData\":{\"h\":5,\"i\":\"3\",\"w\":6,\"x\":6,\"y\":7},\"id\":\"ceb91de0-c250-11e7-8692-232bd1143e8a\",\"panelIndex\":\"3\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"gridData\":{\"h\":3,\"i\":\"4\",\"w\":12,\"x\":0,\"y\":0},\"id\":\"b21e0c70-c252-11e7-8692-232bd1143e8a\",\"panelIndex\":\"4\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"gridData\":{\"h\":4,\"i\":\"5\",\"w\":6,\"x\":0,\"y\":3},\"id\":\"a8e20450-c256-11e7-8692-232bd1143e8a\",\"panelIndex\":\"5\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"}]",
|
"panelsJSON": "[{\"embeddableConfig\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}},\"gridData\":{\"h\":4,\"i\":\"1\",\"w\":6,\"x\":6,\"y\":3},\"id\":\"faf882f0-c242-11e7-8692-232bd1143e8a\",\"panelIndex\":\"1\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"embeddableConfig\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}},\"gridData\":{\"h\":5,\"i\":\"2\",\"w\":6,\"x\":0,\"y\":7},\"id\":\"ea483730-c246-11e7-8692-232bd1143e8a\",\"panelIndex\":\"2\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"embeddableConfig\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}},\"gridData\":{\"h\":5,\"i\":\"3\",\"w\":6,\"x\":6,\"y\":7},\"id\":\"ceb91de0-c250-11e7-8692-232bd1143e8a\",\"panelIndex\":\"3\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"gridData\":{\"h\":3,\"i\":\"4\",\"w\":12,\"x\":0,\"y\":0},\"id\":\"b21e0c70-c252-11e7-8692-232bd1143e8a\",\"panelIndex\":\"4\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"gridData\":{\"h\":4,\"i\":\"5\",\"w\":6,\"x\":0,\"y\":3},\"id\":\"a8e20450-c256-11e7-8692-232bd1143e8a\",\"panelIndex\":\"5\",\"type\":\"visualization\",\"version\":\"6.2.4\"}]",
|
||||||
"timeRestore": false,
|
"timeRestore": false,
|
||||||
"title": "[Auditbeat Auditd] Sockets",
|
"title": "[Auditbeat Auditd] Sockets",
|
||||||
"version": 1
|
"version": 1
|
||||||
@ -184,5 +184,5 @@
|
|||||||
"version": 4
|
"version": 4
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"version": "7.0.0-alpha1-SNAPSHOT"
|
"version": "6.2.4"
|
||||||
}
|
}
|
||||||
|
3
vendor/github.com/elastic/beats/auditbeat/module/auditd/audit_linux_test.go
generated
vendored
3
vendor/github.com/elastic/beats/auditbeat/module/auditd/audit_linux_test.go
generated
vendored
@ -12,12 +12,13 @@ import (
|
|||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
|
"github.com/prometheus/procfs"
|
||||||
|
|
||||||
"github.com/elastic/beats/auditbeat/core"
|
"github.com/elastic/beats/auditbeat/core"
|
||||||
"github.com/elastic/beats/libbeat/logp"
|
"github.com/elastic/beats/libbeat/logp"
|
||||||
mbtest "github.com/elastic/beats/metricbeat/mb/testing"
|
mbtest "github.com/elastic/beats/metricbeat/mb/testing"
|
||||||
"github.com/elastic/go-libaudit"
|
"github.com/elastic/go-libaudit"
|
||||||
"github.com/elastic/go-libaudit/auparse"
|
"github.com/elastic/go-libaudit/auparse"
|
||||||
"github.com/elastic/procfs"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Specify the -audit flag when running these tests to interact with the real
|
// Specify the -audit flag when running these tests to interact with the real
|
||||||
|
6
vendor/github.com/elastic/beats/auditbeat/module/auditd/config_linux_test.go
generated
vendored
6
vendor/github.com/elastic/beats/auditbeat/module/auditd/config_linux_test.go
generated
vendored
@ -14,7 +14,7 @@ audit_rules: |
|
|||||||
# Comments and empty lines are ignored.
|
# Comments and empty lines are ignored.
|
||||||
-w /etc/passwd -p wa -k auth
|
-w /etc/passwd -p wa -k auth
|
||||||
|
|
||||||
-a always,exit -F arch=b64 -S execve -k exec`
|
-a always,exit -S execve -k exec`
|
||||||
|
|
||||||
config, err := parseConfig(t, data)
|
config, err := parseConfig(t, data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -26,7 +26,7 @@ audit_rules: |
|
|||||||
}
|
}
|
||||||
assert.EqualValues(t, []string{
|
assert.EqualValues(t, []string{
|
||||||
"-w /etc/passwd -p wa -k auth",
|
"-w /etc/passwd -p wa -k auth",
|
||||||
"-a always,exit -F arch=b64 -S execve -k exec",
|
"-a always,exit -S execve -k exec",
|
||||||
}, commands(rules))
|
}, commands(rules))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -35,7 +35,7 @@ func TestConfigValidateWithError(t *testing.T) {
|
|||||||
audit_rules: |
|
audit_rules: |
|
||||||
-x bad -F flag
|
-x bad -F flag
|
||||||
-a always,exit -w /etc/passwd
|
-a always,exit -w /etc/passwd
|
||||||
-a always,exit -F arch=b64 -S fake -k exec`
|
-a always,exit -S fake -k exec`
|
||||||
|
|
||||||
_, err := parseConfig(t, data)
|
_, err := parseConfig(t, data)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
@ -218,7 +218,7 @@
|
|||||||
"searchSourceJSON": "{\"filter\":[],\"highlightAll\":true,\"version\":true,\"query\":{\"language\":\"lucene\",\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"default_field\":\"*\",\"query\":\"*\"}}}}"
|
"searchSourceJSON": "{\"filter\":[],\"highlightAll\":true,\"version\":true,\"query\":{\"language\":\"lucene\",\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"default_field\":\"*\",\"query\":\"*\"}}}}"
|
||||||
},
|
},
|
||||||
"optionsJSON": "{\"darkTheme\":false,\"useMargins\":false}",
|
"optionsJSON": "{\"darkTheme\":false,\"useMargins\":false}",
|
||||||
"panelsJSON": "[{\"gridData\":{\"h\":6,\"i\":\"1\",\"w\":2,\"x\":0,\"y\":0},\"id\":\"AV0tVcg6g1PYniApZa-v\",\"panelIndex\":\"1\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"gridData\":{\"h\":6,\"i\":\"2\",\"w\":7,\"x\":2,\"y\":0},\"id\":\"AV0tV05vg1PYniApZbA2\",\"panelIndex\":\"2\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"gridData\":{\"h\":3,\"i\":\"3\",\"w\":3,\"x\":9,\"y\":0},\"id\":\"AV0tWL-Yg1PYniApZbCs\",\"panelIndex\":\"3\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"gridData\":{\"h\":3,\"i\":\"4\",\"w\":3,\"x\":9,\"y\":3},\"id\":\"AV0tWSdXg1PYniApZbDU\",\"panelIndex\":\"4\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"gridData\":{\"h\":3,\"i\":\"5\",\"w\":4,\"x\":4,\"y\":8},\"id\":\"AV0tW0djg1PYniApZbGL\",\"panelIndex\":\"5\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"gridData\":{\"h\":2,\"i\":\"6\",\"w\":4,\"x\":0,\"y\":6},\"id\":\"AV0tY6jwg1PYniApZbRY\",\"panelIndex\":\"6\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"gridData\":{\"h\":2,\"i\":\"7\",\"w\":4,\"x\":4,\"y\":6},\"id\":\"AV0tav8Ag1PYniApZbbK\",\"panelIndex\":\"7\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"gridData\":{\"h\":2,\"i\":\"8\",\"w\":4,\"x\":8,\"y\":6},\"id\":\"AV0tbcUdg1PYniApZbe1\",\"panelIndex\":\"8\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"gridData\":{\"h\":5,\"i\":\"9\",\"w\":6,\"x\":0,\"y\":11},\"id\":\"AV0tc_xZg1PYniApZbnL\",\"panelIndex\":\"9\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"gridData\":{\"h\":3,\"i\":\"10\",\"w\":4,\"x\":8,\"y\":8},\"id\":\"AV0tes4Eg1PYniApZbwV\",\"panelIndex\":\"10\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"gridData\":{\"h\":3,\"i\":\"11\",\"w\":4,\"x\":0,\"y\":8},\"id\":\"AV0te0TCg1PYniApZbw9\",\"panelIndex\":\"11\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"columns\":[\"file.path\",\"event.action\"],\"gridData\":{\"h\":5,\"i\":\"12\",\"w\":6,\"x\":6,\"y\":11},\"id\":\"a380a060-cb44-11e7-9835-2f31fe08873b\",\"panelIndex\":\"12\",\"sort\":[\"@timestamp\",\"desc\"],\"type\":\"search\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"}]",
|
"panelsJSON": "[{\"gridData\":{\"h\":6,\"i\":\"1\",\"w\":2,\"x\":0,\"y\":0},\"id\":\"AV0tVcg6g1PYniApZa-v\",\"panelIndex\":\"1\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"gridData\":{\"h\":6,\"i\":\"2\",\"w\":7,\"x\":2,\"y\":0},\"id\":\"AV0tV05vg1PYniApZbA2\",\"panelIndex\":\"2\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"gridData\":{\"h\":3,\"i\":\"3\",\"w\":3,\"x\":9,\"y\":0},\"id\":\"AV0tWL-Yg1PYniApZbCs\",\"panelIndex\":\"3\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"gridData\":{\"h\":3,\"i\":\"4\",\"w\":3,\"x\":9,\"y\":3},\"id\":\"AV0tWSdXg1PYniApZbDU\",\"panelIndex\":\"4\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"gridData\":{\"h\":3,\"i\":\"5\",\"w\":4,\"x\":4,\"y\":8},\"id\":\"AV0tW0djg1PYniApZbGL\",\"panelIndex\":\"5\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"gridData\":{\"h\":2,\"i\":\"6\",\"w\":4,\"x\":0,\"y\":6},\"id\":\"AV0tY6jwg1PYniApZbRY\",\"panelIndex\":\"6\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"gridData\":{\"h\":2,\"i\":\"7\",\"w\":4,\"x\":4,\"y\":6},\"id\":\"AV0tav8Ag1PYniApZbbK\",\"panelIndex\":\"7\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"gridData\":{\"h\":2,\"i\":\"8\",\"w\":4,\"x\":8,\"y\":6},\"id\":\"AV0tbcUdg1PYniApZbe1\",\"panelIndex\":\"8\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"gridData\":{\"h\":5,\"i\":\"9\",\"w\":6,\"x\":0,\"y\":11},\"id\":\"AV0tc_xZg1PYniApZbnL\",\"panelIndex\":\"9\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"gridData\":{\"h\":3,\"i\":\"10\",\"w\":4,\"x\":8,\"y\":8},\"id\":\"AV0tes4Eg1PYniApZbwV\",\"panelIndex\":\"10\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"gridData\":{\"h\":3,\"i\":\"11\",\"w\":4,\"x\":0,\"y\":8},\"id\":\"AV0te0TCg1PYniApZbw9\",\"panelIndex\":\"11\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"columns\":[\"file.path\",\"event.action\"],\"gridData\":{\"h\":5,\"i\":\"12\",\"w\":6,\"x\":6,\"y\":11},\"id\":\"a380a060-cb44-11e7-9835-2f31fe08873b\",\"panelIndex\":\"12\",\"sort\":[\"@timestamp\",\"desc\"],\"type\":\"search\",\"version\":\"6.2.4\"}]",
|
||||||
"timeRestore": false,
|
"timeRestore": false,
|
||||||
"title": "[Auditbeat File Integrity] Overview",
|
"title": "[Auditbeat File Integrity] Overview",
|
||||||
"version": 1
|
"version": 1
|
||||||
@ -230,4 +230,4 @@
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"version": "6.1.2"
|
"version": "6.1.2"
|
||||||
}
|
}
|
||||||
|
12
vendor/github.com/elastic/beats/auditbeat/module/file_integrity/eventreader_fsnotify.go
generated
vendored
12
vendor/github.com/elastic/beats/auditbeat/module/file_integrity/eventreader_fsnotify.go
generated
vendored
@ -36,6 +36,14 @@ func NewEventReader(c Config) (EventProducer, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *reader) Start(done <-chan struct{}) (<-chan Event, error) {
|
func (r *reader) Start(done <-chan struct{}) (<-chan Event, error) {
|
||||||
|
if err := r.watcher.Start(); err != nil {
|
||||||
|
return nil, errors.Wrap(err, "unable to start watcher")
|
||||||
|
}
|
||||||
|
go r.consumeEvents(done)
|
||||||
|
|
||||||
|
// Windows implementation of fsnotify needs to have the watched paths
|
||||||
|
// installed after the event consumer is started, to avoid a potential
|
||||||
|
// deadlock. Do it on all platforms for simplicity.
|
||||||
for _, p := range r.config.Paths {
|
for _, p := range r.config.Paths {
|
||||||
if err := r.watcher.Add(p); err != nil {
|
if err := r.watcher.Add(p); err != nil {
|
||||||
if err == syscall.EMFILE {
|
if err == syscall.EMFILE {
|
||||||
@ -48,10 +56,6 @@ func (r *reader) Start(done <-chan struct{}) (<-chan Event, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := r.watcher.Start(); err != nil {
|
|
||||||
return nil, errors.Wrap(err, "unable to start watcher")
|
|
||||||
}
|
|
||||||
go r.consumeEvents(done)
|
|
||||||
r.log.Infow("Started fsnotify watcher",
|
r.log.Infow("Started fsnotify watcher",
|
||||||
"file_path", r.config.Paths,
|
"file_path", r.config.Paths,
|
||||||
"recursive", r.config.Recursive)
|
"recursive", r.config.Recursive)
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
// +build !integration
|
||||||
|
|
||||||
package monitor
|
package monitor
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
4
vendor/github.com/elastic/beats/auditbeat/module/file_integrity/monitor/monitor.go
generated
vendored
4
vendor/github.com/elastic/beats/auditbeat/module/file_integrity/monitor/monitor.go
generated
vendored
@ -21,7 +21,9 @@ func New(recursive bool) (Watcher, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if recursive {
|
// Use our simulated recursive watches unless the fsnotify implementation
|
||||||
|
// supports OS-provided recursive watches
|
||||||
|
if recursive && fsnotify.SetRecursive() != nil {
|
||||||
return newRecursiveWatcher(fsnotify), nil
|
return newRecursiveWatcher(fsnotify), nil
|
||||||
}
|
}
|
||||||
return (*nonRecursiveWatcher)(fsnotify), nil
|
return (*nonRecursiveWatcher)(fsnotify), nil
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
// +build !integration
|
||||||
|
|
||||||
package monitor
|
package monitor
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
12
vendor/github.com/elastic/beats/auditbeat/scripts/generate_config.go
generated
vendored
12
vendor/github.com/elastic/beats/auditbeat/scripts/generate_config.go
generated
vendored
@ -18,6 +18,7 @@ const defaultGlob = "module/*/_meta/config*.yml.tpl"
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
goos = flag.String("os", runtime.GOOS, "generate config specific to the specified operating system")
|
goos = flag.String("os", runtime.GOOS, "generate config specific to the specified operating system")
|
||||||
|
goarch = flag.String("arch", runtime.GOARCH, "generate config specific to the specified CPU architecture")
|
||||||
reference = flag.Bool("ref", false, "generate a reference config")
|
reference = flag.Bool("ref", false, "generate a reference config")
|
||||||
concat = flag.Bool("concat", false, "concatenate all configs instead writing individual files")
|
concat = flag.Bool("concat", false, "concatenate all configs instead writing individual files")
|
||||||
)
|
)
|
||||||
@ -40,9 +41,20 @@ func getConfig(file string) ([]byte, error) {
|
|||||||
return nil, errors.Wrapf(err, "failed reading %v", file)
|
return nil, errors.Wrapf(err, "failed reading %v", file)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var archBits string
|
||||||
|
switch *goarch {
|
||||||
|
case "i386":
|
||||||
|
archBits = "32"
|
||||||
|
case "amd64":
|
||||||
|
archBits = "64"
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("supporting only i386 and amd64 architecture")
|
||||||
|
}
|
||||||
data := map[string]interface{}{
|
data := map[string]interface{}{
|
||||||
|
"goarch": *goarch,
|
||||||
"goos": *goos,
|
"goos": *goos,
|
||||||
"reference": *reference,
|
"reference": *reference,
|
||||||
|
"arch_bits": archBits,
|
||||||
}
|
}
|
||||||
buf := new(bytes.Buffer)
|
buf := new(bytes.Buffer)
|
||||||
if err = tpl.Execute(buf, data); err != nil {
|
if err = tpl.Execute(buf, data); err != nil {
|
||||||
|
48
vendor/github.com/elastic/beats/auditbeat/tests/system/auditbeat.py
generated
vendored
48
vendor/github.com/elastic/beats/auditbeat/tests/system/auditbeat.py
generated
vendored
@ -1,14 +1,54 @@
|
|||||||
import os
|
import os
|
||||||
|
import shutil
|
||||||
import sys
|
import sys
|
||||||
|
import tempfile
|
||||||
|
|
||||||
sys.path.append('../../../libbeat/tests/system')
|
sys.path.append(os.path.join(os.path.dirname(__file__), '../../../metricbeat/tests/system'))
|
||||||
from beat.beat import TestCase
|
|
||||||
|
if os.name == "nt":
|
||||||
|
import win32file
|
||||||
|
|
||||||
|
from metricbeat import BaseTest as MetricbeatTest
|
||||||
|
|
||||||
|
|
||||||
class BaseTest(TestCase):
|
class BaseTest(MetricbeatTest):
|
||||||
@classmethod
|
@classmethod
|
||||||
def setUpClass(self):
|
def setUpClass(self):
|
||||||
self.beat_name = "auditbeat"
|
self.beat_name = "auditbeat"
|
||||||
self.beat_path = os.path.abspath(
|
self.beat_path = os.path.abspath(
|
||||||
os.path.join(os.path.dirname(__file__), "../../"))
|
os.path.join(os.path.dirname(__file__), "../../"))
|
||||||
super(BaseTest, self).setUpClass()
|
super(MetricbeatTest, self).setUpClass()
|
||||||
|
|
||||||
|
def create_file(self, path, contents):
|
||||||
|
f = open(path, 'wb')
|
||||||
|
f.write(contents)
|
||||||
|
f.close()
|
||||||
|
|
||||||
|
def check_event(self, event, expected):
|
||||||
|
for key in expected:
|
||||||
|
assert key in event, "key '{0}' not found in event".format(key)
|
||||||
|
assert event[key] == expected[key], \
|
||||||
|
"key '{0}' has value '{1}', expected '{2}'".format(key,
|
||||||
|
event[key],
|
||||||
|
expected[key])
|
||||||
|
|
||||||
|
def temp_dir(self, prefix):
|
||||||
|
# os.path.realpath resolves any symlinks in path. Necessary for macOS
|
||||||
|
# where /var is a symlink to /private/var
|
||||||
|
p = os.path.realpath(tempfile.mkdtemp(prefix))
|
||||||
|
if os.name == "nt":
|
||||||
|
# Under windows, get rid of any ~1 in path (short path)
|
||||||
|
p = str(win32file.GetLongPathName(p))
|
||||||
|
return p
|
||||||
|
|
||||||
|
|
||||||
|
class PathCleanup:
|
||||||
|
def __init__(self, paths):
|
||||||
|
self.paths = paths
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||||
|
for path in self.paths:
|
||||||
|
shutil.rmtree(path)
|
||||||
|
15
vendor/github.com/elastic/beats/auditbeat/tests/system/config/auditbeat.yml.j2
generated
vendored
15
vendor/github.com/elastic/beats/auditbeat/tests/system/config/auditbeat.yml.j2
generated
vendored
@ -18,15 +18,8 @@ auditbeat.modules:
|
|||||||
{%- endfor %}
|
{%- endfor %}
|
||||||
|
|
||||||
queue.mem:
|
queue.mem:
|
||||||
events: 4096
|
events: 4
|
||||||
flush.min_events: 8
|
flush.min_events: 0
|
||||||
flush.timeout: 0.1s
|
flush.timeout: 0.01s
|
||||||
|
|
||||||
{%- if elasticsearch %}
|
{% include './tests/system/config/libbeat.yml.j2' %}
|
||||||
output.elasticsearch:
|
|
||||||
hosts: ["{{ elasticsearch.host }}"]
|
|
||||||
{%- else %}
|
|
||||||
output.file:
|
|
||||||
path: '{{ output_file_path|default(beat.working_dir + "/output") }}'
|
|
||||||
filename: {{ output_file_filename|default("auditbeat") }}
|
|
||||||
{%- endif %}
|
|
||||||
|
47
vendor/github.com/elastic/beats/auditbeat/tests/system/test_base.py
generated
vendored
47
vendor/github.com/elastic/beats/auditbeat/tests/system/test_base.py
generated
vendored
@ -1,5 +1,7 @@
|
|||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
import unittest
|
import unittest
|
||||||
from auditbeat import BaseTest
|
from auditbeat import BaseTest
|
||||||
from elasticsearch import Elasticsearch
|
from elasticsearch import Elasticsearch
|
||||||
@ -7,15 +9,18 @@ from beat.beat import INTEGRATION_TESTS
|
|||||||
|
|
||||||
|
|
||||||
class Test(BaseTest):
|
class Test(BaseTest):
|
||||||
@unittest.skipUnless(re.match("(?i)linux", sys.platform), "os")
|
|
||||||
def test_start_stop(self):
|
def test_start_stop(self):
|
||||||
"""
|
"""
|
||||||
Auditbeat starts and stops without error.
|
Auditbeat starts and stops without error.
|
||||||
"""
|
"""
|
||||||
self.render_config_template(modules=[{
|
self.render_config_template(
|
||||||
"name": "audit",
|
modules=[{
|
||||||
"metricsets": ["kernel"],
|
"name": "file_integrity",
|
||||||
}])
|
"extras": {
|
||||||
|
"paths": ["file.example"],
|
||||||
|
}
|
||||||
|
}],
|
||||||
|
)
|
||||||
proc = self.start_beat()
|
proc = self.start_beat()
|
||||||
self.wait_until(lambda: self.log_contains("start running"))
|
self.wait_until(lambda: self.log_contains("start running"))
|
||||||
proc.check_kill_and_wait()
|
proc.check_kill_and_wait()
|
||||||
@ -35,11 +40,10 @@ class Test(BaseTest):
|
|||||||
|
|
||||||
self.render_config_template(
|
self.render_config_template(
|
||||||
modules=[{
|
modules=[{
|
||||||
"name": "audit",
|
"name": "file_integrity",
|
||||||
"metricsets": ["file"],
|
|
||||||
"extras": {
|
"extras": {
|
||||||
"file.paths": ["file.example"],
|
"paths": ["file.example"],
|
||||||
},
|
}
|
||||||
}],
|
}],
|
||||||
elasticsearch={"host": self.get_elasticsearch_url()})
|
elasticsearch={"host": self.get_elasticsearch_url()})
|
||||||
exit_code = self.run_beat(extra_args=["setup", "--template"])
|
exit_code = self.run_beat(extra_args=["setup", "--template"])
|
||||||
@ -47,3 +51,28 @@ class Test(BaseTest):
|
|||||||
assert exit_code == 0
|
assert exit_code == 0
|
||||||
assert self.log_contains('Loaded index template')
|
assert self.log_contains('Loaded index template')
|
||||||
assert len(es.cat.templates(name='auditbeat-*', h='name')) > 0
|
assert len(es.cat.templates(name='auditbeat-*', h='name')) > 0
|
||||||
|
|
||||||
|
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
|
||||||
|
def test_dashboards(self):
|
||||||
|
"""
|
||||||
|
Test that the dashboards can be loaded with `setup --dashboards`
|
||||||
|
"""
|
||||||
|
|
||||||
|
kibana_dir = os.path.join(self.beat_path, "_meta", "kibana")
|
||||||
|
shutil.copytree(kibana_dir, os.path.join(self.working_dir, "kibana"))
|
||||||
|
|
||||||
|
es = Elasticsearch([self.get_elasticsearch_url()])
|
||||||
|
self.render_config_template(
|
||||||
|
modules=[{
|
||||||
|
"name": "file_integrity",
|
||||||
|
"extras": {
|
||||||
|
"paths": ["file.example"],
|
||||||
|
}
|
||||||
|
}],
|
||||||
|
elasticsearch={"host": self.get_elasticsearch_url()},
|
||||||
|
kibana={"host": self.get_kibana_url()},
|
||||||
|
)
|
||||||
|
exit_code = self.run_beat(extra_args=["setup", "--dashboards"])
|
||||||
|
|
||||||
|
assert exit_code == 0
|
||||||
|
assert self.log_contains("Kibana dashboards successfully loaded.")
|
||||||
|
189
vendor/github.com/elastic/beats/auditbeat/tests/system/test_file_integrity.py
generated
vendored
Normal file
189
vendor/github.com/elastic/beats/auditbeat/tests/system/test_file_integrity.py
generated
vendored
Normal file
@ -0,0 +1,189 @@
|
|||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
import time
|
||||||
|
import unittest
|
||||||
|
from auditbeat import *
|
||||||
|
from beat.beat import INTEGRATION_TESTS
|
||||||
|
|
||||||
|
|
||||||
|
# Escapes a path to match what's printed in the logs
|
||||||
|
def escape_path(path):
|
||||||
|
return path.replace('\\', '\\\\')
|
||||||
|
|
||||||
|
|
||||||
|
def has_file(objs, path, sha1hash):
|
||||||
|
found = False
|
||||||
|
for obj in objs:
|
||||||
|
if 'file.path' in obj and 'hash.sha1' in obj \
|
||||||
|
and obj['file.path'].lower() == path.lower() and obj['hash.sha1'] == sha1hash:
|
||||||
|
found = True
|
||||||
|
break
|
||||||
|
assert found, "File '{0}' with sha1sum '{1}' not found".format(path, sha1hash)
|
||||||
|
|
||||||
|
|
||||||
|
def has_dir(objs, path):
|
||||||
|
found = False
|
||||||
|
for obj in objs:
|
||||||
|
if 'file.path' in obj and obj['file.path'].lower() == path.lower() and obj['file.type'] == "dir":
|
||||||
|
found = True
|
||||||
|
break
|
||||||
|
assert found, "Dir '{0}' not found".format(path)
|
||||||
|
|
||||||
|
|
||||||
|
def file_events(objs, path, expected):
|
||||||
|
evts = set()
|
||||||
|
for obj in objs:
|
||||||
|
if 'file.path' in obj and 'event.action' in obj and obj['file.path'].lower() == path.lower():
|
||||||
|
if type(obj['event.action']) == list:
|
||||||
|
evts = evts.union(set(obj['event.action']))
|
||||||
|
else:
|
||||||
|
evts.add(obj['event.action'])
|
||||||
|
for wanted in set(expected):
|
||||||
|
assert wanted in evts, "Event {0} for path '{1}' not found (got {2})".format(
|
||||||
|
wanted, path, evts)
|
||||||
|
|
||||||
|
|
||||||
|
def wrap_except(expr):
|
||||||
|
try:
|
||||||
|
return expr()
|
||||||
|
except IOError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
class Test(BaseTest):
|
||||||
|
|
||||||
|
def wait_output(self, min_events):
|
||||||
|
self.wait_until(lambda: wrap_except(lambda: len(self.read_output()) >= min_events))
|
||||||
|
# wait for the number of lines in the file to stay constant for a second
|
||||||
|
prev_lines = -1
|
||||||
|
while True:
|
||||||
|
num_lines = self.output_lines()
|
||||||
|
if prev_lines < num_lines:
|
||||||
|
prev_lines = num_lines
|
||||||
|
time.sleep(1)
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
|
||||||
|
def test_non_recursive(self):
|
||||||
|
"""
|
||||||
|
file_integrity monitors watched directories (non recursive).
|
||||||
|
"""
|
||||||
|
|
||||||
|
dirs = [self.temp_dir("auditbeat_test"),
|
||||||
|
self.temp_dir("auditbeat_test")]
|
||||||
|
|
||||||
|
with PathCleanup(dirs):
|
||||||
|
self.render_config_template(
|
||||||
|
modules=[{
|
||||||
|
"name": "file_integrity",
|
||||||
|
"extras": {
|
||||||
|
"paths": dirs,
|
||||||
|
"scan_at_start": False
|
||||||
|
}
|
||||||
|
}],
|
||||||
|
)
|
||||||
|
proc = self.start_beat()
|
||||||
|
|
||||||
|
# wait until the directories to watch are printed in the logs
|
||||||
|
# this happens when the file_integrity module starts.
|
||||||
|
# Case must be ignored under windows as capitalisation of paths
|
||||||
|
# may differ
|
||||||
|
self.wait_log_contains(escape_path(dirs[0]), max_timeout=30, ignore_case=True)
|
||||||
|
|
||||||
|
file1 = os.path.join(dirs[0], 'file.txt')
|
||||||
|
self.create_file(file1, "hello world!")
|
||||||
|
|
||||||
|
file2 = os.path.join(dirs[1], 'file2.txt')
|
||||||
|
self.create_file(file2, "Foo bar")
|
||||||
|
|
||||||
|
# wait until file1 is reported before deleting. Otherwise the hash
|
||||||
|
# might not be calculated
|
||||||
|
self.wait_log_contains("\"path\": \"{0}\"".format(escape_path(file1)), ignore_case=True)
|
||||||
|
|
||||||
|
os.unlink(file1)
|
||||||
|
|
||||||
|
subdir = os.path.join(dirs[0], "subdir")
|
||||||
|
os.mkdir(subdir)
|
||||||
|
file3 = os.path.join(subdir, "other_file.txt")
|
||||||
|
self.create_file(file3, "not reported.")
|
||||||
|
|
||||||
|
self.wait_log_contains("\"deleted\"")
|
||||||
|
self.wait_log_contains("\"path\": \"{0}\"".format(escape_path(subdir)), ignore_case=True)
|
||||||
|
self.wait_output(3)
|
||||||
|
|
||||||
|
proc.check_kill_and_wait()
|
||||||
|
self.assert_no_logged_warnings()
|
||||||
|
|
||||||
|
# Ensure all Beater stages are used.
|
||||||
|
assert self.log_contains("Setup Beat: auditbeat")
|
||||||
|
assert self.log_contains("auditbeat start running")
|
||||||
|
assert self.log_contains("auditbeat stopped")
|
||||||
|
|
||||||
|
objs = self.read_output()
|
||||||
|
|
||||||
|
has_file(objs, file1, "430ce34d020724ed75a196dfc2ad67c77772d169")
|
||||||
|
has_file(objs, file2, "d23be250530a24be33069572db67995f21244c51")
|
||||||
|
has_dir(objs, subdir)
|
||||||
|
|
||||||
|
file_events(objs, file1, ['created', 'deleted'])
|
||||||
|
file_events(objs, file2, ['created'])
|
||||||
|
|
||||||
|
# assert file inside subdir is not reported
|
||||||
|
assert self.log_contains(file3) is False
|
||||||
|
|
||||||
|
def test_recursive(self):
|
||||||
|
"""
|
||||||
|
file_integrity monitors watched directories (recursive).
|
||||||
|
"""
|
||||||
|
|
||||||
|
dirs = [self.temp_dir("auditbeat_test")]
|
||||||
|
|
||||||
|
with PathCleanup(dirs):
|
||||||
|
self.render_config_template(
|
||||||
|
modules=[{
|
||||||
|
"name": "file_integrity",
|
||||||
|
"extras": {
|
||||||
|
"paths": dirs,
|
||||||
|
"scan_at_start": False,
|
||||||
|
"recursive": True
|
||||||
|
}
|
||||||
|
}],
|
||||||
|
)
|
||||||
|
proc = self.start_beat()
|
||||||
|
|
||||||
|
# wait until the directories to watch are printed in the logs
|
||||||
|
# this happens when the file_integrity module starts
|
||||||
|
self.wait_log_contains(escape_path(dirs[0]), max_timeout=30, ignore_case=True)
|
||||||
|
self.wait_log_contains("\"recursive\": true")
|
||||||
|
|
||||||
|
subdir = os.path.join(dirs[0], "subdir")
|
||||||
|
os.mkdir(subdir)
|
||||||
|
file1 = os.path.join(subdir, "file.txt")
|
||||||
|
self.create_file(file1, "hello world!")
|
||||||
|
|
||||||
|
subdir2 = os.path.join(subdir, "other")
|
||||||
|
os.mkdir(subdir2)
|
||||||
|
file2 = os.path.join(subdir2, "more.txt")
|
||||||
|
self.create_file(file2, "")
|
||||||
|
|
||||||
|
self.wait_log_contains("\"path\": \"{0}\"".format(escape_path(file2)), ignore_case=True)
|
||||||
|
self.wait_output(4)
|
||||||
|
|
||||||
|
proc.check_kill_and_wait()
|
||||||
|
self.assert_no_logged_warnings()
|
||||||
|
|
||||||
|
# Ensure all Beater stages are used.
|
||||||
|
assert self.log_contains("Setup Beat: auditbeat")
|
||||||
|
assert self.log_contains("auditbeat start running")
|
||||||
|
assert self.log_contains("auditbeat stopped")
|
||||||
|
|
||||||
|
objs = self.read_output()
|
||||||
|
|
||||||
|
has_file(objs, file1, "430ce34d020724ed75a196dfc2ad67c77772d169")
|
||||||
|
has_file(objs, file2, "da39a3ee5e6b4b0d3255bfef95601890afd80709")
|
||||||
|
has_dir(objs, subdir)
|
||||||
|
has_dir(objs, subdir2)
|
||||||
|
|
||||||
|
file_events(objs, file1, ['created'])
|
||||||
|
file_events(objs, file2, ['created'])
|
62
vendor/github.com/elastic/beats/deploy/kubernetes/.travis/setup.sh
generated
vendored
62
vendor/github.com/elastic/beats/deploy/kubernetes/.travis/setup.sh
generated
vendored
@ -3,59 +3,13 @@
|
|||||||
|
|
||||||
set -x
|
set -x
|
||||||
|
|
||||||
# set docker0 to promiscuous mode
|
export CHANGE_MINIKUBE_NONE_USER=true
|
||||||
sudo ip link set docker0 promisc on
|
|
||||||
|
|
||||||
# install etcd
|
curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/$TRAVIS_K8S_VERSION/bin/linux/amd64/kubectl && \
|
||||||
wget https://github.com/coreos/etcd/releases/download/$TRAVIS_ETCD_VERSION/etcd-$TRAVIS_ETCD_VERSION-linux-amd64.tar.gz
|
chmod +x kubectl && sudo mv kubectl /usr/local/bin/
|
||||||
tar xzf etcd-$TRAVIS_ETCD_VERSION-linux-amd64.tar.gz
|
curl -Lo minikube https://storage.googleapis.com/minikube/releases/$TRAVIS_MINIKUBE_VERSION/minikube-linux-amd64 && chmod +x minikube && sudo mv minikube /usr/local/bin/
|
||||||
sudo mv etcd-$TRAVIS_ETCD_VERSION-linux-amd64/etcd /usr/local/bin/etcd
|
sudo minikube start --vm-driver=none --kubernetes-version=$TRAVIS_K8S_VERSION --logtostderr
|
||||||
rm etcd-$TRAVIS_ETCD_VERSION-linux-amd64.tar.gz
|
minikube update-context
|
||||||
rm -rf etcd-$TRAVIS_ETCD_VERSION-linux-amd64
|
JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}'; \
|
||||||
|
until kubectl get nodes -o jsonpath="$JSONPATH" 2>&1 | grep -q "Ready=True"; do sleep 1; done
|
||||||
|
|
||||||
# download kubectl
|
|
||||||
wget https://storage.googleapis.com/kubernetes-release/release/$TRAVIS_KUBE_VERSION/bin/linux/amd64/kubectl
|
|
||||||
chmod +x kubectl
|
|
||||||
sudo mv kubectl /usr/local/bin/kubectl
|
|
||||||
|
|
||||||
# download kubernetes
|
|
||||||
git clone https://github.com/kubernetes/kubernetes $HOME/kubernetes
|
|
||||||
|
|
||||||
# install cfssl
|
|
||||||
go get -u github.com/cloudflare/cfssl/cmd/...
|
|
||||||
|
|
||||||
pushd $HOME/kubernetes
|
|
||||||
git checkout $TRAVIS_KUBE_VERSION
|
|
||||||
kubectl config set-credentials myself --username=admin --password=admin
|
|
||||||
kubectl config set-context local --cluster=local --user=myself
|
|
||||||
kubectl config set-cluster local --server=http://localhost:8080
|
|
||||||
kubectl config use-context local
|
|
||||||
|
|
||||||
# start kubernetes in the background
|
|
||||||
sudo PATH=$PATH:/home/travis/.gimme/versions/go1.7.linux.amd64/bin/go \
|
|
||||||
KUBE_ENABLE_CLUSTER_DNS=true \
|
|
||||||
hack/local-up-cluster.sh &
|
|
||||||
popd
|
|
||||||
|
|
||||||
# Wait until kube is up and running
|
|
||||||
TIMEOUT=0
|
|
||||||
TIMEOUT_COUNT=800
|
|
||||||
until $(curl --output /dev/null --silent http://localhost:8080) || [ $TIMEOUT -eq $TIMEOUT_COUNT ]; do
|
|
||||||
echo "Kube is not up yet"
|
|
||||||
let TIMEOUT=TIMEOUT+1
|
|
||||||
sleep 1
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ $TIMEOUT -eq $TIMEOUT_COUNT ]; then
|
|
||||||
echo "Kubernetes is not up and running"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Kubernetes is deployed and reachable"
|
|
||||||
|
|
||||||
# Try and sleep before issuing chown. Currently, Kubernetes is started by
|
|
||||||
# a command that is run in the background. Technically Kubernetes could be
|
|
||||||
# up and running, but those files might not exist yet as the previous command
|
|
||||||
# could create them after Kube starts successfully.
|
|
||||||
sleep 30
|
|
||||||
sudo chown -R $USER:$USER $HOME/.kube
|
|
||||||
|
27
vendor/github.com/elastic/beats/deploy/kubernetes/filebeat-kubernetes.yaml
generated
vendored
27
vendor/github.com/elastic/beats/deploy/kubernetes/filebeat-kubernetes.yaml
generated
vendored
@ -6,14 +6,13 @@ metadata:
|
|||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: filebeat
|
k8s-app: filebeat
|
||||||
kubernetes.io/cluster-service: "true"
|
|
||||||
data:
|
data:
|
||||||
filebeat.yml: |-
|
filebeat.yml: |-
|
||||||
filebeat.config:
|
filebeat.config:
|
||||||
prospectors:
|
inputs:
|
||||||
# Mounted `filebeat-prospectors` configmap:
|
# Mounted `filebeat-inputs` configmap:
|
||||||
path: ${path.config}/prospectors.d/*.yml
|
path: ${path.config}/inputs.d/*.yml
|
||||||
# Reload prospectors configs as they change:
|
# Reload inputs configs as they change:
|
||||||
reload.enabled: false
|
reload.enabled: false
|
||||||
modules:
|
modules:
|
||||||
path: ${path.config}/modules.d/*.yml
|
path: ${path.config}/modules.d/*.yml
|
||||||
@ -34,11 +33,10 @@ data:
|
|||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: ConfigMap
|
kind: ConfigMap
|
||||||
metadata:
|
metadata:
|
||||||
name: filebeat-prospectors
|
name: filebeat-inputs
|
||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: filebeat
|
k8s-app: filebeat
|
||||||
kubernetes.io/cluster-service: "true"
|
|
||||||
data:
|
data:
|
||||||
kubernetes.yml: |-
|
kubernetes.yml: |-
|
||||||
- type: docker
|
- type: docker
|
||||||
@ -55,19 +53,17 @@ metadata:
|
|||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: filebeat
|
k8s-app: filebeat
|
||||||
kubernetes.io/cluster-service: "true"
|
|
||||||
spec:
|
spec:
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
labels:
|
labels:
|
||||||
k8s-app: filebeat
|
k8s-app: filebeat
|
||||||
kubernetes.io/cluster-service: "true"
|
|
||||||
spec:
|
spec:
|
||||||
serviceAccountName: filebeat
|
serviceAccountName: filebeat
|
||||||
terminationGracePeriodSeconds: 30
|
terminationGracePeriodSeconds: 30
|
||||||
containers:
|
containers:
|
||||||
- name: filebeat
|
- name: filebeat
|
||||||
image: docker.elastic.co/beats/filebeat:6.2.3
|
image: docker.elastic.co/beats/filebeat:6.3.2
|
||||||
args: [
|
args: [
|
||||||
"-c", "/etc/filebeat.yml",
|
"-c", "/etc/filebeat.yml",
|
||||||
"-e",
|
"-e",
|
||||||
@ -98,8 +94,8 @@ spec:
|
|||||||
mountPath: /etc/filebeat.yml
|
mountPath: /etc/filebeat.yml
|
||||||
readOnly: true
|
readOnly: true
|
||||||
subPath: filebeat.yml
|
subPath: filebeat.yml
|
||||||
- name: prospectors
|
- name: inputs
|
||||||
mountPath: /usr/share/filebeat/prospectors.d
|
mountPath: /usr/share/filebeat/inputs.d
|
||||||
readOnly: true
|
readOnly: true
|
||||||
- name: data
|
- name: data
|
||||||
mountPath: /usr/share/filebeat/data
|
mountPath: /usr/share/filebeat/data
|
||||||
@ -114,10 +110,13 @@ spec:
|
|||||||
- name: varlibdockercontainers
|
- name: varlibdockercontainers
|
||||||
hostPath:
|
hostPath:
|
||||||
path: /var/lib/docker/containers
|
path: /var/lib/docker/containers
|
||||||
- name: prospectors
|
- name: inputs
|
||||||
configMap:
|
configMap:
|
||||||
defaultMode: 0600
|
defaultMode: 0600
|
||||||
name: filebeat-prospectors
|
name: filebeat-inputs
|
||||||
|
# We set an `emptyDir` here to ensure the manifest will deploy correctly.
|
||||||
|
# It's recommended to change this to a `hostPath` folder, to ensure internal data
|
||||||
|
# files survive pod changes (ie: version upgrade)
|
||||||
- name: data
|
- name: data
|
||||||
emptyDir: {}
|
emptyDir: {}
|
||||||
---
|
---
|
||||||
|
2
vendor/github.com/elastic/beats/deploy/kubernetes/filebeat/README.md
generated
vendored
2
vendor/github.com/elastic/beats/deploy/kubernetes/filebeat/README.md
generated
vendored
@ -8,7 +8,7 @@ By deploying filebeat as a [DaemonSet](https://kubernetes.io/docs/concepts/workl
|
|||||||
we ensure we get a running filebeat daemon on each node of the cluster.
|
we ensure we get a running filebeat daemon on each node of the cluster.
|
||||||
|
|
||||||
Docker logs host folder (`/var/lib/docker/containers`) is mounted on the
|
Docker logs host folder (`/var/lib/docker/containers`) is mounted on the
|
||||||
filebeat container. Filebeat will start a prospector for these files and start
|
filebeat container. Filebeat will start an input for these files and start
|
||||||
harvesting them as they appear.
|
harvesting them as they appear.
|
||||||
|
|
||||||
Everything is deployed under `kube-system` namespace, you can change that by
|
Everything is deployed under `kube-system` namespace, you can change that by
|
||||||
|
12
vendor/github.com/elastic/beats/deploy/kubernetes/filebeat/filebeat-configmap.yaml
generated
vendored
12
vendor/github.com/elastic/beats/deploy/kubernetes/filebeat/filebeat-configmap.yaml
generated
vendored
@ -6,14 +6,13 @@ metadata:
|
|||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: filebeat
|
k8s-app: filebeat
|
||||||
kubernetes.io/cluster-service: "true"
|
|
||||||
data:
|
data:
|
||||||
filebeat.yml: |-
|
filebeat.yml: |-
|
||||||
filebeat.config:
|
filebeat.config:
|
||||||
prospectors:
|
inputs:
|
||||||
# Mounted `filebeat-prospectors` configmap:
|
# Mounted `filebeat-inputs` configmap:
|
||||||
path: ${path.config}/prospectors.d/*.yml
|
path: ${path.config}/inputs.d/*.yml
|
||||||
# Reload prospectors configs as they change:
|
# Reload inputs configs as they change:
|
||||||
reload.enabled: false
|
reload.enabled: false
|
||||||
modules:
|
modules:
|
||||||
path: ${path.config}/modules.d/*.yml
|
path: ${path.config}/modules.d/*.yml
|
||||||
@ -34,11 +33,10 @@ data:
|
|||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: ConfigMap
|
kind: ConfigMap
|
||||||
metadata:
|
metadata:
|
||||||
name: filebeat-prospectors
|
name: filebeat-inputs
|
||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: filebeat
|
k8s-app: filebeat
|
||||||
kubernetes.io/cluster-service: "true"
|
|
||||||
data:
|
data:
|
||||||
kubernetes.yml: |-
|
kubernetes.yml: |-
|
||||||
- type: docker
|
- type: docker
|
||||||
|
13
vendor/github.com/elastic/beats/deploy/kubernetes/filebeat/filebeat-daemonset.yaml
generated
vendored
13
vendor/github.com/elastic/beats/deploy/kubernetes/filebeat/filebeat-daemonset.yaml
generated
vendored
@ -5,13 +5,11 @@ metadata:
|
|||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: filebeat
|
k8s-app: filebeat
|
||||||
kubernetes.io/cluster-service: "true"
|
|
||||||
spec:
|
spec:
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
labels:
|
labels:
|
||||||
k8s-app: filebeat
|
k8s-app: filebeat
|
||||||
kubernetes.io/cluster-service: "true"
|
|
||||||
spec:
|
spec:
|
||||||
serviceAccountName: filebeat
|
serviceAccountName: filebeat
|
||||||
terminationGracePeriodSeconds: 30
|
terminationGracePeriodSeconds: 30
|
||||||
@ -48,8 +46,8 @@ spec:
|
|||||||
mountPath: /etc/filebeat.yml
|
mountPath: /etc/filebeat.yml
|
||||||
readOnly: true
|
readOnly: true
|
||||||
subPath: filebeat.yml
|
subPath: filebeat.yml
|
||||||
- name: prospectors
|
- name: inputs
|
||||||
mountPath: /usr/share/filebeat/prospectors.d
|
mountPath: /usr/share/filebeat/inputs.d
|
||||||
readOnly: true
|
readOnly: true
|
||||||
- name: data
|
- name: data
|
||||||
mountPath: /usr/share/filebeat/data
|
mountPath: /usr/share/filebeat/data
|
||||||
@ -64,9 +62,12 @@ spec:
|
|||||||
- name: varlibdockercontainers
|
- name: varlibdockercontainers
|
||||||
hostPath:
|
hostPath:
|
||||||
path: /var/lib/docker/containers
|
path: /var/lib/docker/containers
|
||||||
- name: prospectors
|
- name: inputs
|
||||||
configMap:
|
configMap:
|
||||||
defaultMode: 0600
|
defaultMode: 0600
|
||||||
name: filebeat-prospectors
|
name: filebeat-inputs
|
||||||
|
# We set an `emptyDir` here to ensure the manifest will deploy correctly.
|
||||||
|
# It's recommended to change this to a `hostPath` folder, to ensure internal data
|
||||||
|
# files survive pod changes (ie: version upgrade)
|
||||||
- name: data
|
- name: data
|
||||||
emptyDir: {}
|
emptyDir: {}
|
||||||
|
16
vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat-kubernetes.yaml
generated
vendored
16
vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat-kubernetes.yaml
generated
vendored
@ -6,7 +6,6 @@ metadata:
|
|||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: metricbeat
|
k8s-app: metricbeat
|
||||||
kubernetes.io/cluster-service: "true"
|
|
||||||
data:
|
data:
|
||||||
metricbeat.yml: |-
|
metricbeat.yml: |-
|
||||||
metricbeat.config.modules:
|
metricbeat.config.modules:
|
||||||
@ -33,7 +32,6 @@ metadata:
|
|||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: metricbeat
|
k8s-app: metricbeat
|
||||||
kubernetes.io/cluster-service: "true"
|
|
||||||
data:
|
data:
|
||||||
system.yml: |-
|
system.yml: |-
|
||||||
- module: system
|
- module: system
|
||||||
@ -80,20 +78,19 @@ metadata:
|
|||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: metricbeat
|
k8s-app: metricbeat
|
||||||
kubernetes.io/cluster-service: "true"
|
|
||||||
spec:
|
spec:
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
labels:
|
labels:
|
||||||
k8s-app: metricbeat
|
k8s-app: metricbeat
|
||||||
kubernetes.io/cluster-service: "true"
|
|
||||||
spec:
|
spec:
|
||||||
serviceAccountName: metricbeat
|
serviceAccountName: metricbeat
|
||||||
terminationGracePeriodSeconds: 30
|
terminationGracePeriodSeconds: 30
|
||||||
hostNetwork: true
|
hostNetwork: true
|
||||||
|
dnsPolicy: ClusterFirstWithHostNet
|
||||||
containers:
|
containers:
|
||||||
- name: metricbeat
|
- name: metricbeat
|
||||||
image: docker.elastic.co/beats/metricbeat:6.2.3
|
image: docker.elastic.co/beats/metricbeat:6.3.2
|
||||||
args: [
|
args: [
|
||||||
"-c", "/etc/metricbeat.yml",
|
"-c", "/etc/metricbeat.yml",
|
||||||
"-e",
|
"-e",
|
||||||
@ -158,6 +155,9 @@ spec:
|
|||||||
configMap:
|
configMap:
|
||||||
defaultMode: 0600
|
defaultMode: 0600
|
||||||
name: metricbeat-daemonset-modules
|
name: metricbeat-daemonset-modules
|
||||||
|
# We set an `emptyDir` here to ensure the manifest will deploy correctly.
|
||||||
|
# It's recommended to change this to a `hostPath` folder, to ensure internal data
|
||||||
|
# files survive pod changes (ie: version upgrade)
|
||||||
- name: data
|
- name: data
|
||||||
emptyDir: {}
|
emptyDir: {}
|
||||||
---
|
---
|
||||||
@ -168,7 +168,6 @@ metadata:
|
|||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: metricbeat
|
k8s-app: metricbeat
|
||||||
kubernetes.io/cluster-service: "true"
|
|
||||||
data:
|
data:
|
||||||
# This module requires `kube-state-metrics` up and running under `kube-system` namespace
|
# This module requires `kube-state-metrics` up and running under `kube-system` namespace
|
||||||
kubernetes.yml: |-
|
kubernetes.yml: |-
|
||||||
@ -192,17 +191,16 @@ metadata:
|
|||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: metricbeat
|
k8s-app: metricbeat
|
||||||
kubernetes.io/cluster-service: "true"
|
|
||||||
spec:
|
spec:
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
labels:
|
labels:
|
||||||
k8s-app: metricbeat
|
k8s-app: metricbeat
|
||||||
kubernetes.io/cluster-service: "true"
|
|
||||||
spec:
|
spec:
|
||||||
|
serviceAccountName: metricbeat
|
||||||
containers:
|
containers:
|
||||||
- name: metricbeat
|
- name: metricbeat
|
||||||
image: docker.elastic.co/beats/metricbeat:6.2.3
|
image: docker.elastic.co/beats/metricbeat:6.3.2
|
||||||
args: [
|
args: [
|
||||||
"-c", "/etc/metricbeat.yml",
|
"-c", "/etc/metricbeat.yml",
|
||||||
"-e",
|
"-e",
|
||||||
|
@ -6,7 +6,6 @@ metadata:
|
|||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: metricbeat
|
k8s-app: metricbeat
|
||||||
kubernetes.io/cluster-service: "true"
|
|
||||||
data:
|
data:
|
||||||
metricbeat.yml: |-
|
metricbeat.yml: |-
|
||||||
metricbeat.config.modules:
|
metricbeat.config.modules:
|
||||||
@ -33,7 +32,6 @@ metadata:
|
|||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: metricbeat
|
k8s-app: metricbeat
|
||||||
kubernetes.io/cluster-service: "true"
|
|
||||||
data:
|
data:
|
||||||
system.yml: |-
|
system.yml: |-
|
||||||
- module: system
|
- module: system
|
||||||
|
@ -6,17 +6,16 @@ metadata:
|
|||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: metricbeat
|
k8s-app: metricbeat
|
||||||
kubernetes.io/cluster-service: "true"
|
|
||||||
spec:
|
spec:
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
labels:
|
labels:
|
||||||
k8s-app: metricbeat
|
k8s-app: metricbeat
|
||||||
kubernetes.io/cluster-service: "true"
|
|
||||||
spec:
|
spec:
|
||||||
serviceAccountName: metricbeat
|
serviceAccountName: metricbeat
|
||||||
terminationGracePeriodSeconds: 30
|
terminationGracePeriodSeconds: 30
|
||||||
hostNetwork: true
|
hostNetwork: true
|
||||||
|
dnsPolicy: ClusterFirstWithHostNet
|
||||||
containers:
|
containers:
|
||||||
- name: metricbeat
|
- name: metricbeat
|
||||||
image: docker.elastic.co/beats/metricbeat:%VERSION%
|
image: docker.elastic.co/beats/metricbeat:%VERSION%
|
||||||
@ -84,5 +83,8 @@ spec:
|
|||||||
configMap:
|
configMap:
|
||||||
defaultMode: 0600
|
defaultMode: 0600
|
||||||
name: metricbeat-daemonset-modules
|
name: metricbeat-daemonset-modules
|
||||||
|
# We set an `emptyDir` here to ensure the manifest will deploy correctly.
|
||||||
|
# It's recommended to change this to a `hostPath` folder, to ensure internal data
|
||||||
|
# files survive pod changes (ie: version upgrade)
|
||||||
- name: data
|
- name: data
|
||||||
emptyDir: {}
|
emptyDir: {}
|
||||||
|
@ -5,7 +5,6 @@ metadata:
|
|||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: metricbeat
|
k8s-app: metricbeat
|
||||||
kubernetes.io/cluster-service: "true"
|
|
||||||
data:
|
data:
|
||||||
# This module requires `kube-state-metrics` up and running under `kube-system` namespace
|
# This module requires `kube-state-metrics` up and running under `kube-system` namespace
|
||||||
kubernetes.yml: |-
|
kubernetes.yml: |-
|
||||||
|
@ -6,14 +6,13 @@ metadata:
|
|||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: metricbeat
|
k8s-app: metricbeat
|
||||||
kubernetes.io/cluster-service: "true"
|
|
||||||
spec:
|
spec:
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
labels:
|
labels:
|
||||||
k8s-app: metricbeat
|
k8s-app: metricbeat
|
||||||
kubernetes.io/cluster-service: "true"
|
|
||||||
spec:
|
spec:
|
||||||
|
serviceAccountName: metricbeat
|
||||||
containers:
|
containers:
|
||||||
- name: metricbeat
|
- name: metricbeat
|
||||||
image: docker.elastic.co/beats/metricbeat:%VERSION%
|
image: docker.elastic.co/beats/metricbeat:%VERSION%
|
||||||
|
23
vendor/github.com/elastic/beats/dev-tools/cherrypick_pr
generated
vendored
23
vendor/github.com/elastic/beats/dev-tools/cherrypick_pr
generated
vendored
@ -2,6 +2,7 @@
|
|||||||
"""Cherry pick and backport a PR"""
|
"""Cherry pick and backport a PR"""
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
|
import os
|
||||||
import argparse
|
import argparse
|
||||||
from os.path import expanduser
|
from os.path import expanduser
|
||||||
import re
|
import re
|
||||||
@ -57,6 +58,8 @@ def main():
|
|||||||
parser.add_argument("--create_pr", action="store_true",
|
parser.add_argument("--create_pr", action="store_true",
|
||||||
help="Create a PR using the Github API " +
|
help="Create a PR using the Github API " +
|
||||||
"(requires token in ~/.elastic/github.token)")
|
"(requires token in ~/.elastic/github.token)")
|
||||||
|
parser.add_argument("--diff", action="store_true",
|
||||||
|
help="Display the diff before pushing the PR")
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
print(args)
|
print(args)
|
||||||
@ -97,6 +100,12 @@ def main():
|
|||||||
print("No commit to push")
|
print("No commit to push")
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
|
if args.diff:
|
||||||
|
call("git diff {}".format(args.to_branch), shell=True)
|
||||||
|
if raw_input("Continue? [y/n]: ") != "y":
|
||||||
|
print("Aborting cherry-pick.")
|
||||||
|
return 1
|
||||||
|
|
||||||
print("Ready to push branch.")
|
print("Ready to push branch.")
|
||||||
remote = raw_input("To which remote should I push? (your fork): ")
|
remote = raw_input("To which remote should I push? (your fork): ")
|
||||||
call("git push {} :{} > /dev/null".format(remote, tmp_branch),
|
call("git push {} :{} > /dev/null".format(remote, tmp_branch),
|
||||||
@ -140,9 +149,23 @@ def main():
|
|||||||
# remove needs backport label from the original PR
|
# remove needs backport label from the original PR
|
||||||
session.delete(base + "/issues/{}/labels/needs_backport".format(args.pr_number))
|
session.delete(base + "/issues/{}/labels/needs_backport".format(args.pr_number))
|
||||||
|
|
||||||
|
# get version and set a version label on the original PR
|
||||||
|
version = get_version(os.getcwd())
|
||||||
|
if version:
|
||||||
|
session.post(
|
||||||
|
base + "/issues/{}/labels".format(args.pr_number), json=["v" + version])
|
||||||
|
|
||||||
print("\nDone. PR created: {}".format(new_pr["html_url"]))
|
print("\nDone. PR created: {}".format(new_pr["html_url"]))
|
||||||
print("Please go and check it and add the review tags")
|
print("Please go and check it and add the review tags")
|
||||||
|
|
||||||
|
def get_version(beats_dir):
|
||||||
|
pattern = re.compile(r'(const\s|)\w*(v|V)ersion\s=\s"(?P<version>.*)"')
|
||||||
|
with open(os.path.join(beats_dir, "libbeat/version/version.go"), "r") as f:
|
||||||
|
for line in f:
|
||||||
|
match = pattern.match(line)
|
||||||
|
if match:
|
||||||
|
return match.group('version')
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
4
vendor/github.com/elastic/beats/dev-tools/deploy
generated
vendored
4
vendor/github.com/elastic/beats/dev-tools/deploy
generated
vendored
@ -16,9 +16,9 @@ def main():
|
|||||||
check_call("make clean", shell=True)
|
check_call("make clean", shell=True)
|
||||||
print("Done building Docker images.")
|
print("Done building Docker images.")
|
||||||
if args.no_snapshot:
|
if args.no_snapshot:
|
||||||
check_call("make SNAPSHOT=no package", shell=True)
|
check_call("make SNAPSHOT=no package-all", shell=True)
|
||||||
else:
|
else:
|
||||||
check_call("make SNAPSHOT=yes package", shell=True)
|
check_call("make SNAPSHOT=yes package-all", shell=True)
|
||||||
print("All done")
|
print("All done")
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
142
vendor/github.com/elastic/beats/dev-tools/generate_notice.py
generated
vendored
142
vendor/github.com/elastic/beats/dev-tools/generate_notice.py
generated
vendored
@ -44,33 +44,37 @@ def read_versions(vendor):
|
|||||||
return libs
|
return libs
|
||||||
|
|
||||||
|
|
||||||
def gather_dependencies(vendor_dirs):
|
def gather_dependencies(vendor_dirs, overrides=None):
|
||||||
dependencies = {} # lib_path -> [array of lib]
|
dependencies = {} # lib_path -> [array of lib]
|
||||||
for vendor in vendor_dirs:
|
for vendor in vendor_dirs:
|
||||||
libs = read_versions(vendor)
|
libs = read_versions(vendor)
|
||||||
|
|
||||||
# walk looking for LICENSE files
|
# walk looking for LICENSE files
|
||||||
for root, dirs, filenames in os.walk(vendor):
|
for root, dirs, filenames in os.walk(vendor):
|
||||||
for filename in sorted(filenames):
|
licenses = get_licenses(root)
|
||||||
if filename.startswith("LICENSE"):
|
for filename in licenses:
|
||||||
lib_path = get_library_path(root)
|
lib_path = get_library_path(root)
|
||||||
lib_search = [l for l in libs if l["path"].startswith(lib_path)]
|
lib_search = [l for l in libs if l["path"].startswith(lib_path)]
|
||||||
if len(lib_search) == 0:
|
if len(lib_search) == 0:
|
||||||
print("WARNING: No version information found for: {}".format(lib_path))
|
print("WARNING: No version information found for: {}".format(lib_path))
|
||||||
lib = {"path": lib_path}
|
lib = {"path": lib_path}
|
||||||
else:
|
else:
|
||||||
lib = lib_search[0]
|
lib = lib_search[0]
|
||||||
lib["license_file"] = os.path.join(root, filename)
|
lib["license_file"] = os.path.join(root, filename)
|
||||||
|
|
||||||
lib["license_contents"] = read_file(lib["license_file"])
|
lib["license_contents"] = read_file(lib["license_file"])
|
||||||
lib["license_summary"] = detect_license_summary(lib["license_contents"])
|
lib["license_summary"] = detect_license_summary(lib["license_contents"])
|
||||||
if lib["license_summary"] == "UNKNOWN":
|
if lib["license_summary"] == "UNKNOWN":
|
||||||
print("WARNING: Unknown license for: {}".format(lib_path))
|
print("WARNING: Unknown license for: {}".format(lib_path))
|
||||||
|
|
||||||
if lib_path not in dependencies:
|
revision = overrides.get(lib_path, {}).get("revision")
|
||||||
dependencies[lib_path] = [lib]
|
if revision:
|
||||||
else:
|
lib["revision"] = revision
|
||||||
dependencies[lib_path].append(lib)
|
|
||||||
|
if lib_path not in dependencies:
|
||||||
|
dependencies[lib_path] = [lib]
|
||||||
|
else:
|
||||||
|
dependencies[lib_path].append(lib)
|
||||||
|
|
||||||
# don't walk down into another vendor dir
|
# don't walk down into another vendor dir
|
||||||
if "vendor" in dirs:
|
if "vendor" in dirs:
|
||||||
@ -78,6 +82,61 @@ def gather_dependencies(vendor_dirs):
|
|||||||
return dependencies
|
return dependencies
|
||||||
|
|
||||||
|
|
||||||
|
def get_licenses(folder):
|
||||||
|
"""
|
||||||
|
Get a list of license files from a given directory.
|
||||||
|
"""
|
||||||
|
licenses = []
|
||||||
|
for filename in sorted(os.listdir(folder)):
|
||||||
|
if filename.startswith("LICENSE") and "docs" not in filename:
|
||||||
|
licenses.append(filename)
|
||||||
|
elif filename.startswith("APLv2"): # gorhill/cronexpr
|
||||||
|
licenses.append(filename)
|
||||||
|
return licenses
|
||||||
|
|
||||||
|
|
||||||
|
def has_license(folder):
|
||||||
|
"""
|
||||||
|
Checks if a particular repo has a license files.
|
||||||
|
|
||||||
|
There are two cases accepted:
|
||||||
|
* The folder contains a LICENSE
|
||||||
|
* The folder only contains subdirectories AND all these
|
||||||
|
subdirectories contain a LICENSE
|
||||||
|
"""
|
||||||
|
if len(get_licenses(folder)) > 0:
|
||||||
|
return True, ""
|
||||||
|
|
||||||
|
for subdir in os.listdir(folder):
|
||||||
|
if not os.path.isdir(os.path.join(folder, subdir)):
|
||||||
|
return False, folder
|
||||||
|
if len(get_licenses(os.path.join(folder, subdir))) == 0:
|
||||||
|
return False, os.path.join(folder, subdir)
|
||||||
|
return True, ""
|
||||||
|
|
||||||
|
|
||||||
|
def check_all_have_license_files(vendor_dirs):
|
||||||
|
"""
|
||||||
|
Checks that everything in the vendor folders has a license one way
|
||||||
|
or the other. This doesn't collect the licenses, because the code that
|
||||||
|
collects the licenses needs to walk the full tree. This one makes sure
|
||||||
|
that every folder in the `vendor` directories has at least one license.
|
||||||
|
"""
|
||||||
|
issues = []
|
||||||
|
for vendor in vendor_dirs:
|
||||||
|
for root, dirs, filenames in os.walk(vendor):
|
||||||
|
if root.count(os.sep) - vendor.count(os.sep) == 2: # two levels deep
|
||||||
|
# Two level deep means folders like `github.com/elastic`.
|
||||||
|
# look for the license in root but also one level up
|
||||||
|
ok, issue = has_license(root)
|
||||||
|
if not ok:
|
||||||
|
print("No license in: {}".format(issue))
|
||||||
|
issues.append(issue)
|
||||||
|
if len(issues) > 0:
|
||||||
|
raise Exception("I have found licensing issues in the following folders: {}"
|
||||||
|
.format(issues))
|
||||||
|
|
||||||
|
|
||||||
def write_notice_file(f, beat, copyright, dependencies):
|
def write_notice_file(f, beat, copyright, dependencies):
|
||||||
|
|
||||||
now = datetime.datetime.now()
|
now = datetime.datetime.now()
|
||||||
@ -137,20 +196,27 @@ def get_url(repo):
|
|||||||
return "https://github.com/{}/{}".format(words[1], words[2])
|
return "https://github.com/{}/{}".format(words[1], words[2])
|
||||||
|
|
||||||
|
|
||||||
def create_notice(filename, beat, copyright, vendor_dirs, csvfile):
|
def create_notice(filename, beat, copyright, vendor_dirs, csvfile, overrides=None):
|
||||||
dependencies = gather_dependencies(vendor_dirs)
|
dependencies = gather_dependencies(vendor_dirs, overrides=overrides)
|
||||||
if not csvfile:
|
if not csvfile:
|
||||||
with open(filename, "w+") as f:
|
with open(filename, "w+") as f:
|
||||||
write_notice_file(f, beat, copyright, dependencies)
|
write_notice_file(f, beat, copyright, dependencies)
|
||||||
|
print("Available at {}".format(filename))
|
||||||
else:
|
else:
|
||||||
with open(csvfile, "wb") as f:
|
with open(csvfile, "wb") as f:
|
||||||
csvwriter = csv.writer(f)
|
csvwriter = csv.writer(f)
|
||||||
write_csv_file(csvwriter, dependencies)
|
write_csv_file(csvwriter, dependencies)
|
||||||
|
print("Available at {}".format(csvfile))
|
||||||
|
return dependencies
|
||||||
|
|
||||||
|
|
||||||
APACHE2_LICENSE_TITLES = [
|
APACHE2_LICENSE_TITLES = [
|
||||||
"Apache License Version 2.0",
|
"Apache License Version 2.0",
|
||||||
"Apache License, Version 2.0"
|
"Apache License, Version 2.0",
|
||||||
|
re.sub(r"\s+", " ", """Apache License
|
||||||
|
==============
|
||||||
|
|
||||||
|
_Version 2.0, January 2004_"""),
|
||||||
]
|
]
|
||||||
|
|
||||||
MIT_LICENSES = [
|
MIT_LICENSES = [
|
||||||
@ -166,7 +232,7 @@ copies or substantial portions of the Software.
|
|||||||
"""),
|
"""),
|
||||||
re.sub(r"\s+", " ", """Permission to use, copy, modify, and distribute this software for any
|
re.sub(r"\s+", " ", """Permission to use, copy, modify, and distribute this software for any
|
||||||
purpose with or without fee is hereby granted, provided that the above
|
purpose with or without fee is hereby granted, provided that the above
|
||||||
copyright notice and this permission notice appear in all copies.""")
|
copyright notice and this permission notice appear in all copies."""),
|
||||||
]
|
]
|
||||||
|
|
||||||
BSD_LICENSE_CONTENTS = [
|
BSD_LICENSE_CONTENTS = [
|
||||||
@ -209,6 +275,8 @@ MPL_LICENSE_TITLES = [
|
|||||||
def detect_license_summary(content):
|
def detect_license_summary(content):
|
||||||
# replace all white spaces with a single space
|
# replace all white spaces with a single space
|
||||||
content = re.sub(r"\s+", ' ', content)
|
content = re.sub(r"\s+", ' ', content)
|
||||||
|
# replace smart quotes with less intelligent ones
|
||||||
|
content = content.replace(b'\xe2\x80\x9c', '"').replace(b'\xe2\x80\x9d', '"')
|
||||||
if any(sentence in content[0:1000] for sentence in APACHE2_LICENSE_TITLES):
|
if any(sentence in content[0:1000] for sentence in APACHE2_LICENSE_TITLES):
|
||||||
return "Apache-2.0"
|
return "Apache-2.0"
|
||||||
if any(sentence in content[0:1000] for sentence in MIT_LICENSES):
|
if any(sentence in content[0:1000] for sentence in MIT_LICENSES):
|
||||||
@ -230,6 +298,14 @@ def detect_license_summary(content):
|
|||||||
return "UNKNOWN"
|
return "UNKNOWN"
|
||||||
|
|
||||||
|
|
||||||
|
ACCEPTED_LICENSES = [
|
||||||
|
"Apache-2.0",
|
||||||
|
"MIT",
|
||||||
|
"BSD-4-Clause",
|
||||||
|
"BSD-3-Clause",
|
||||||
|
"BSD-2-Clause",
|
||||||
|
"MPL-2.0",
|
||||||
|
]
|
||||||
SKIP_NOTICE = []
|
SKIP_NOTICE = []
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
@ -246,6 +322,9 @@ if __name__ == "__main__":
|
|||||||
help="Output to a csv file")
|
help="Output to a csv file")
|
||||||
parser.add_argument("-e", "--excludes", default=["dev-tools", "build"],
|
parser.add_argument("-e", "--excludes", default=["dev-tools", "build"],
|
||||||
help="List of top directories to exclude")
|
help="List of top directories to exclude")
|
||||||
|
# no need to be generic for now, no other transitive dependency information available
|
||||||
|
parser.add_argument("--beats-origin", type=argparse.FileType('r'),
|
||||||
|
help="path to beats vendor.json")
|
||||||
parser.add_argument("-s", "--skip-notice", default=[],
|
parser.add_argument("-s", "--skip-notice", default=[],
|
||||||
help="List of NOTICE files to skip")
|
help="List of NOTICE files to skip")
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
@ -273,7 +352,18 @@ if __name__ == "__main__":
|
|||||||
if exclude in dirs:
|
if exclude in dirs:
|
||||||
dirs.remove(exclude)
|
dirs.remove(exclude)
|
||||||
|
|
||||||
print("Get the licenses available from {}".format(vendor_dirs))
|
overrides = {} # revision overrides only for now
|
||||||
create_notice(notice, args.beat, args.copyright, vendor_dirs, args.csvfile)
|
if args.beats_origin:
|
||||||
|
govendor = json.load(args.beats_origin)
|
||||||
|
overrides = {package['path']: package for package in govendor["package"]}
|
||||||
|
|
||||||
print("Available at {}".format(notice))
|
print("Get the licenses available from {}".format(vendor_dirs))
|
||||||
|
check_all_have_license_files(vendor_dirs)
|
||||||
|
dependencies = create_notice(notice, args.beat, args.copyright, vendor_dirs, args.csvfile, overrides=overrides)
|
||||||
|
|
||||||
|
# check that all licenses are accepted
|
||||||
|
for _, deps in dependencies.items():
|
||||||
|
for dep in deps:
|
||||||
|
if dep["license_summary"] not in ACCEPTED_LICENSES:
|
||||||
|
raise Exception("Dependency {} has invalid license {}"
|
||||||
|
.format(dep["path"], dep["license_summary"]))
|
||||||
|
9
vendor/github.com/elastic/beats/dev-tools/jenkins_ci.ps1
generated
vendored
9
vendor/github.com/elastic/beats/dev-tools/jenkins_ci.ps1
generated
vendored
@ -36,13 +36,14 @@ exec { go get -u github.com/jstemmer/go-junit-report }
|
|||||||
echo "Building $env:beat"
|
echo "Building $env:beat"
|
||||||
exec { go build } "Build FAILURE"
|
exec { go build } "Build FAILURE"
|
||||||
|
|
||||||
|
# always build the libbeat fields
|
||||||
|
cp ..\libbeat\_meta\fields.common.yml ..\libbeat\_meta\fields.generated.yml
|
||||||
|
cat ..\libbeat\processors\*\_meta\fields.yml | Out-File -append -encoding UTF8 -filepath ..\libbeat\_meta\fields.generated.yml
|
||||||
|
cp ..\libbeat\_meta\fields.generated.yml ..\libbeat\fields.yml
|
||||||
|
|
||||||
if ($env:beat -eq "metricbeat") {
|
if ($env:beat -eq "metricbeat") {
|
||||||
cp .\_meta\fields.common.yml .\_meta\fields.generated.yml
|
cp .\_meta\fields.common.yml .\_meta\fields.generated.yml
|
||||||
python .\scripts\fields_collector.py | out-file -append -encoding UTF8 -filepath .\_meta\fields.generated.yml
|
python .\scripts\fields_collector.py | out-file -append -encoding UTF8 -filepath .\_meta\fields.generated.yml
|
||||||
} elseif ($env:beat -eq "libbeat") {
|
|
||||||
cp .\_meta\fields.common.yml .\_meta\fields.generated.yml
|
|
||||||
cat processors\*\_meta\fields.yml | Out-File -append -encoding UTF8 -filepath .\_meta\fields.generated.yml
|
|
||||||
cp .\_meta\fields.generated.yml .\fields.yml
|
|
||||||
}
|
}
|
||||||
|
|
||||||
echo "Unit testing $env:beat"
|
echo "Unit testing $env:beat"
|
||||||
|
15
vendor/github.com/elastic/beats/dev-tools/open_pr
generated
vendored
15
vendor/github.com/elastic/beats/dev-tools/open_pr
generated
vendored
@ -2,6 +2,7 @@
|
|||||||
"""Open a PR from the current branch"""
|
"""Open a PR from the current branch"""
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
|
import os
|
||||||
import argparse
|
import argparse
|
||||||
import requests
|
import requests
|
||||||
import re
|
import re
|
||||||
@ -54,6 +55,11 @@ def main():
|
|||||||
if args.wip:
|
if args.wip:
|
||||||
lables += "in progress"
|
lables += "in progress"
|
||||||
|
|
||||||
|
# get version and set a version label on the original PR
|
||||||
|
version = get_version(os.getcwd())
|
||||||
|
if version:
|
||||||
|
labels.append("v" + version)
|
||||||
|
|
||||||
print("Branch: {}".format(args.branch))
|
print("Branch: {}".format(args.branch))
|
||||||
print("Remote: {}".format(args.remote))
|
print("Remote: {}".format(args.remote))
|
||||||
print("Local branch: {}".format(local_branch))
|
print("Local branch: {}".format(local_branch))
|
||||||
@ -98,5 +104,14 @@ def main():
|
|||||||
print("Please go and review it for the message and labels.")
|
print("Please go and review it for the message and labels.")
|
||||||
|
|
||||||
|
|
||||||
|
def get_version(beats_dir):
|
||||||
|
pattern = re.compile(r'(const\s|)\w*(v|V)ersion\s=\s"(?P<version>.*)"')
|
||||||
|
with open(os.path.join(beats_dir, "libbeat/version/version.go"), "r") as f:
|
||||||
|
for line in f:
|
||||||
|
match = pattern.match(line)
|
||||||
|
if match:
|
||||||
|
return match.group('version')
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
28
vendor/github.com/elastic/beats/dev-tools/packer/Makefile
generated
vendored
28
vendor/github.com/elastic/beats/dev-tools/packer/Makefile
generated
vendored
@ -12,32 +12,32 @@ beat_abspath=${BEATS_GOPATH}/src/${BEAT_PATH}
|
|||||||
|
|
||||||
%/deb: ${BUILD_DIR}/god-linux-386 ${BUILD_DIR}/god-linux-amd64 fpm-image
|
%/deb: ${BUILD_DIR}/god-linux-386 ${BUILD_DIR}/god-linux-amd64 fpm-image
|
||||||
echo Creating DEB packages for $(@D)
|
echo Creating DEB packages for $(@D)
|
||||||
ARCH=386 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/debian/build.sh
|
ARCH=386 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} UPLOAD_DIR=${UPLOAD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/debian/build.sh
|
||||||
ARCH=amd64 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/debian/build.sh
|
ARCH=amd64 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} UPLOAD_DIR=${UPLOAD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/debian/build.sh
|
||||||
|
|
||||||
%/rpm: ${BUILD_DIR}/god-linux-386 ${BUILD_DIR}/god-linux-amd64 fpm-image
|
%/rpm: ${BUILD_DIR}/god-linux-386 ${BUILD_DIR}/god-linux-amd64 fpm-image
|
||||||
echo Creating RPM packages for $(@D)
|
echo Creating RPM packages for $(@D)
|
||||||
ARCH=386 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/centos/build.sh
|
ARCH=386 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} UPLOAD_DIR=${UPLOAD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/centos/build.sh
|
||||||
ARCH=amd64 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/centos/build.sh
|
ARCH=amd64 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} UPLOAD_DIR=${UPLOAD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/centos/build.sh
|
||||||
|
|
||||||
%/darwin:
|
%/darwin:
|
||||||
echo Creating Darwin packages for $(@D)
|
echo Creating Darwin packages for $(@D)
|
||||||
ARCH=amd64 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/darwin/build.sh
|
ARCH=amd64 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} UPLOAD_DIR=${UPLOAD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/darwin/build.sh
|
||||||
|
|
||||||
%/win:
|
%/win:
|
||||||
echo Creating Darwin packages for $(@D)
|
echo Creating Darwin packages for $(@D)
|
||||||
ARCH=386 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/windows/build.sh
|
ARCH=386 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} UPLOAD_DIR=${UPLOAD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/windows/build.sh
|
||||||
ARCH=amd64 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/windows/build.sh
|
ARCH=amd64 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} UPLOAD_DIR=${UPLOAD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/windows/build.sh
|
||||||
|
|
||||||
%/bin:
|
%/bin:
|
||||||
echo Creating Linux packages for $(@D)
|
echo Creating Linux packages for $(@D)
|
||||||
ARCH=386 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/binary/build.sh
|
ARCH=386 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} UPLOAD_DIR=${UPLOAD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/binary/build.sh
|
||||||
ARCH=amd64 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/binary/build.sh
|
ARCH=amd64 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} UPLOAD_DIR=${UPLOAD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/binary/build.sh
|
||||||
|
|
||||||
.PHONY: package-dashboards
|
.PHONY: package-dashboards
|
||||||
package-dashboards:
|
package-dashboards:
|
||||||
echo Creating the Dashboards package
|
echo Creating the Dashboards package
|
||||||
BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/dashboards/build.sh
|
UPLOAD_DIR=${UPLOAD_DIR} BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/dashboards/build.sh
|
||||||
|
|
||||||
.PHONY: deps
|
.PHONY: deps
|
||||||
deps:
|
deps:
|
||||||
@ -61,11 +61,11 @@ go-daemon-image:
|
|||||||
${BUILD_DIR}/god-linux-386 ${BUILD_DIR}/god-linux-amd64:
|
${BUILD_DIR}/god-linux-386 ${BUILD_DIR}/god-linux-amd64:
|
||||||
docker run --rm -v ${BUILD_DIR}:/build tudorg/go-daemon
|
docker run --rm -v ${BUILD_DIR}:/build tudorg/go-daemon
|
||||||
|
|
||||||
${BUILD_DIR}/upload:
|
${UPLOAD_DIR}:
|
||||||
mkdir -p ${BUILD_DIR}/upload
|
mkdir -p ${UPLOAD_DIR}
|
||||||
|
|
||||||
${BUILD_DIR}/upload/build_id.txt:
|
${UPLOAD_DIR}/build_id.txt:
|
||||||
echo $(BUILDID) > ${BUILD_DIR}/upload/build_id.txt
|
echo $(BUILDID) > ${UPLOAD_DIR}/build_id.txt
|
||||||
|
|
||||||
# Build the image required for package-upload.
|
# Build the image required for package-upload.
|
||||||
.PHONY: deb-rpm-s3
|
.PHONY: deb-rpm-s3
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
FROM tudorg/xgo-deb7-1.9.2
|
FROM tudorg/xgo-deb7-1.9.4
|
||||||
|
|
||||||
MAINTAINER Tudor Golubenco <tudor@elastic.co>
|
MAINTAINER Tudor Golubenco <tudor@elastic.co>
|
||||||
|
|
||||||
|
@ -7,10 +7,10 @@ index f5612e6..0c77efa 100644
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
-#cgo linux LDFLAGS: -lpcap
|
-#cgo linux LDFLAGS: -lpcap
|
||||||
+#cgo linux,386 CFLAGS: -I /libpcap/i386/usr/include/
|
+#cgo linux,386 CFLAGS: -I/libpcap/i386/usr/include/
|
||||||
+#cgo linux,386 LDFLAGS: /libpcap/i386/usr/lib/libpcap.a
|
+#cgo linux,386 LDFLAGS: -L/libpcap/i386/usr/lib/ -lpcap
|
||||||
+#cgo linux,amd64 CFLAGS: -I /libpcap/amd64/libpcap-1.8.1
|
+#cgo linux,amd64 CFLAGS: -I/libpcap/amd64/libpcap-1.8.1
|
||||||
+#cgo linux,amd64 LDFLAGS: /libpcap/amd64/libpcap-1.8.1/libpcap.a
|
+#cgo linux,amd64 LDFLAGS: -L/libpcap/amd64/libpcap-1.8.1 -lpcap
|
||||||
#cgo freebsd LDFLAGS: -lpcap
|
#cgo freebsd LDFLAGS: -lpcap
|
||||||
#cgo openbsd LDFLAGS: -lpcap
|
#cgo openbsd LDFLAGS: -lpcap
|
||||||
#cgo darwin LDFLAGS: -lpcap
|
#cgo darwin LDFLAGS: -lpcap
|
||||||
@ -18,9 +18,9 @@ index f5612e6..0c77efa 100644
|
|||||||
-#cgo windows CFLAGS: -I C:/WpdPack/Include
|
-#cgo windows CFLAGS: -I C:/WpdPack/Include
|
||||||
-#cgo windows,386 LDFLAGS: -L C:/WpdPack/Lib -lwpcap
|
-#cgo windows,386 LDFLAGS: -L C:/WpdPack/Lib -lwpcap
|
||||||
-#cgo windows,amd64 LDFLAGS: -L C:/WpdPack/Lib/x64 -lwpcap
|
-#cgo windows,amd64 LDFLAGS: -L C:/WpdPack/Lib/x64 -lwpcap
|
||||||
+#cgo windows CFLAGS: -I /libpcap/win/WpdPack/Include
|
+#cgo windows CFLAGS: -I/libpcap/win/WpdPack/Include
|
||||||
+#cgo windows,386 LDFLAGS: -L /libpcap/win/WpdPack/Lib -lwpcap
|
+#cgo windows,386 LDFLAGS: -L/libpcap/win/WpdPack/Lib -lwpcap
|
||||||
+#cgo windows,amd64 LDFLAGS: -L /libpcap/win/WpdPack/Lib/x64 -lwpcap
|
+#cgo windows,amd64 LDFLAGS: -L/libpcap/win/WpdPack/Lib/x64 -lwpcap
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <pcap.h>
|
#include <pcap.h>
|
||||||
|
|
||||||
|
2
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb7/build.sh
generated
vendored
2
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb7/build.sh
generated
vendored
@ -2,5 +2,5 @@
|
|||||||
cp -r ../../../vendor/gopkg.in/yaml.v2 beats-builder/yaml.v2
|
cp -r ../../../vendor/gopkg.in/yaml.v2 beats-builder/yaml.v2
|
||||||
cp -r ../../../vendor/github.com/tsg/gotpl beats-builder/gotpl
|
cp -r ../../../vendor/github.com/tsg/gotpl beats-builder/gotpl
|
||||||
docker build --rm=true -t tudorg/xgo-deb7-base base/ && \
|
docker build --rm=true -t tudorg/xgo-deb7-base base/ && \
|
||||||
docker build --rm=true -t tudorg/xgo-deb7-1.9.2 go-1.9.2/ &&
|
docker build --rm=true -t tudorg/xgo-deb7-1.9.4 go-1.9.4/ &&
|
||||||
docker build --rm=true -t tudorg/beats-builder-deb7 beats-builder
|
docker build --rm=true -t tudorg/beats-builder-deb7 beats-builder
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# Go cross compiler (xgo): Go 1.9.2 layer
|
# Go cross compiler (xgo): Go 1.9.4 layer
|
||||||
# Copyright (c) 2014 Péter Szilágyi. All rights reserved.
|
# Copyright (c) 2014 Péter Szilágyi. All rights reserved.
|
||||||
#
|
#
|
||||||
# Released under the MIT license.
|
# Released under the MIT license.
|
||||||
@ -9,7 +9,7 @@ MAINTAINER Tudor Golubenco <tudor@elastic.co>
|
|||||||
|
|
||||||
# Configure the root Go distribution and bootstrap based on it
|
# Configure the root Go distribution and bootstrap based on it
|
||||||
RUN \
|
RUN \
|
||||||
export ROOT_DIST="https://storage.googleapis.com/golang/go1.9.2.linux-amd64.tar.gz" && \
|
export ROOT_DIST="https://storage.googleapis.com/golang/go1.9.4.linux-amd64.tar.gz" && \
|
||||||
export ROOT_DIST_SHA1="94c889e039e3d2e94ed95e8f8cb747c5bc1c2b58" && \
|
export ROOT_DIST_SHA1="ed1bd37c356338a5a04923c183931a96687f202e" && \
|
||||||
\
|
\
|
||||||
$BOOTSTRAP_PURE
|
$BOOTSTRAP_PURE
|
3
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/base/Dockerfile
generated
vendored
3
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/base/Dockerfile
generated
vendored
@ -19,7 +19,10 @@ RUN chmod +x $FETCH
|
|||||||
|
|
||||||
|
|
||||||
# Make sure apt-get is up to date and dependent packages are installed
|
# Make sure apt-get is up to date and dependent packages are installed
|
||||||
|
# XXX: The first line is a workaround for the "Sum hash mismatch" error, from here:
|
||||||
|
# https://askubuntu.com/questions/760574/sudo-apt-get-update-failes-due-to-hash-sum-mismatch
|
||||||
RUN \
|
RUN \
|
||||||
|
apt-get clean && \
|
||||||
apt-get update && \
|
apt-get update && \
|
||||||
apt-get install -y automake autogen build-essential ca-certificates \
|
apt-get install -y automake autogen build-essential ca-certificates \
|
||||||
gcc-arm-linux-gnueabi g++-arm-linux-gnueabi libc6-dev-armel-cross \
|
gcc-arm-linux-gnueabi g++-arm-linux-gnueabi libc6-dev-armel-cross \
|
||||||
|
23
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/base/build.sh
generated
vendored
23
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/base/build.sh
generated
vendored
@ -105,7 +105,14 @@ fi
|
|||||||
|
|
||||||
if [ "$FLAG_V" == "true" ]; then V=-v; fi
|
if [ "$FLAG_V" == "true" ]; then V=-v; fi
|
||||||
if [ "$FLAG_RACE" == "true" ]; then R=-race; fi
|
if [ "$FLAG_RACE" == "true" ]; then R=-race; fi
|
||||||
if [ "$STATIC" == "true" ]; then LDARGS=--ldflags\ \'-extldflags\ \"-static\"\'; fi
|
|
||||||
|
# exactly one -ldflags allowed
|
||||||
|
LDFLAGS_STATIC=""
|
||||||
|
if [ "$STATIC" == "true" ]; then LDFLAGS_STATIC='-extldflags "-static"'; fi
|
||||||
|
NOW=$(date -u '+%Y-%m-%dT%H:%M:%SZ')
|
||||||
|
LDFLAGS_VERSION="-X=github.com/elastic/beats/libbeat/version.buildTime=${NOW} -X=github.com/elastic/beats/libbeat/version.commit=${BUILDID}"
|
||||||
|
LDFLAGS_VENDOR_VERSION="-X=${BEAT_PATH}/vendor/github.com/elastic/beats/libbeat/version.buildTime=${NOW} -X=${BEAT_PATH}/vendor/github.com/elastic/beats/libbeat/version.commit=${BUILDID}"
|
||||||
|
LDFLAGS="${LDFLAGS_VERSION} ${LDFLAGS_VENDOR_VERSION} ${LDFLAGS_STATIC}"
|
||||||
|
|
||||||
if [ -n $BEFORE_BUILD ]; then
|
if [ -n $BEFORE_BUILD ]; then
|
||||||
chmod +x /scripts/$BEFORE_BUILD
|
chmod +x /scripts/$BEFORE_BUILD
|
||||||
@ -132,20 +139,20 @@ for TARGET in $TARGETS; do
|
|||||||
export PKG_CONFIG_PATH=/usr/aarch64-linux-gnu/lib/pkgconfig
|
export PKG_CONFIG_PATH=/usr/aarch64-linux-gnu/lib/pkgconfig
|
||||||
|
|
||||||
GOOS=linux GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} go get -d ./$PACK
|
GOOS=linux GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} go get -d ./$PACK
|
||||||
sh -c "GOOS=linux GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} go build $V $R $LDARGS -o /build/$NAME-linux-amd64$R ./$PACK"
|
sh -c "GOOS=linux GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} go build $V $R -ldflags=\"${LDFLAGS}\" -o /build/$NAME-linux-amd64$R ./$PACK"
|
||||||
fi
|
fi
|
||||||
if ([ $XGOOS == "." ] || [ $XGOOS == "linux" ]) && ([ $XGOARCH == "." ] || [ $XGOARCH == "386" ]); then
|
if ([ $XGOOS == "." ] || [ $XGOOS == "linux" ]) && ([ $XGOARCH == "." ] || [ $XGOARCH == "386" ]); then
|
||||||
echo "Compiling $PACK for linux/386..."
|
echo "Compiling $PACK for linux/386..."
|
||||||
CFLAGS=-m32 CXXFLAGS=-m32 LDFLAGS=-m32 HOST=i686-linux PREFIX=/usr/local $BUILD_DEPS /deps $LIST_DEPS
|
CFLAGS=-m32 CXXFLAGS=-m32 LDFLAGS=-m32 HOST=i686-linux PREFIX=/usr/local $BUILD_DEPS /deps $LIST_DEPS
|
||||||
GOOS=linux GOARCH=386 CGO_ENABLED=${CGO_ENABLED} go get -d ./$PACK
|
GOOS=linux GOARCH=386 CGO_ENABLED=${CGO_ENABLED} go get -d ./$PACK
|
||||||
sh -c "GOOS=linux GOARCH=386 CGO_ENABLED=${CGO_ENABLED} go build $V $R $LDARGS -o /build/$NAME-linux-386$R ./$PACK"
|
sh -c "GOOS=linux GOARCH=386 CGO_ENABLED=${CGO_ENABLED} go build $V $R -ldflags=\"${LDFLAGS}\" -o /build/$NAME-linux-386$R ./$PACK"
|
||||||
fi
|
fi
|
||||||
if ([ $XGOOS == "." ] || [ $XGOOS == "linux" ]) && ([ $XGOARCH == "." ] || [ $XGOARCH == "arm" ]); then
|
if ([ $XGOOS == "." ] || [ $XGOOS == "linux" ]) && ([ $XGOARCH == "." ] || [ $XGOARCH == "arm" ]); then
|
||||||
echo "Compiling $PACK for linux/arm..."
|
echo "Compiling $PACK for linux/arm..."
|
||||||
CC=arm-linux-gnueabi-gcc CXX=rm-linux-gnueabi-g++ HOST=arm-linux PREFIX=/usr/local/arm $BUILD_DEPS /deps $LIST_DEPS
|
CC=arm-linux-gnueabi-gcc CXX=rm-linux-gnueabi-g++ HOST=arm-linux PREFIX=/usr/local/arm $BUILD_DEPS /deps $LIST_DEPS
|
||||||
|
|
||||||
CC=arm-linux-gnueabi-gcc CXX=rm-linux-gnueabi-g++ GOOS=linux GOARCH=arm CGO_ENABLED=${CGO_ENABLED} GOARM=5 go get -d ./$PACK
|
CC=arm-linux-gnueabi-gcc CXX=rm-linux-gnueabi-g++ GOOS=linux GOARCH=arm CGO_ENABLED=${CGO_ENABLED} GOARM=5 go get -d ./$PACK
|
||||||
CC=arm-linux-gnueabi-gcc CXX=rm-linux-gnueabi-g++ GOOS=linux GOARCH=arm CGO_ENABLED=${CGO_ENABLED} GOARM=5 go build $V -o /build/$NAME-linux-arm ./$PACK
|
CC=arm-linux-gnueabi-gcc CXX=rm-linux-gnueabi-g++ GOOS=linux GOARCH=arm CGO_ENABLED=${CGO_ENABLED} GOARM=5 go build $V -ldflags="${LDFLAGS}" -o /build/$NAME-linux-arm ./$PACK
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Check and build for Windows targets
|
# Check and build for Windows targets
|
||||||
@ -169,7 +176,7 @@ for TARGET in $TARGETS; do
|
|||||||
export PKG_CONFIG_PATH=/usr/x86_64-w64-mingw32/lib/pkgconfig
|
export PKG_CONFIG_PATH=/usr/x86_64-w64-mingw32/lib/pkgconfig
|
||||||
|
|
||||||
CC=x86_64-w64-mingw32-gcc CXX=x86_64-w64-mingw32-g++ GOOS=windows GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} CGO_CFLAGS="$CGO_NTDEF" CGO_CXXFLAGS="$CGO_NTDEF" go get -d ./$PACK
|
CC=x86_64-w64-mingw32-gcc CXX=x86_64-w64-mingw32-g++ GOOS=windows GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} CGO_CFLAGS="$CGO_NTDEF" CGO_CXXFLAGS="$CGO_NTDEF" go get -d ./$PACK
|
||||||
CC=x86_64-w64-mingw32-gcc CXX=x86_64-w64-mingw32-g++ GOOS=windows GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} CGO_CFLAGS="$CGO_NTDEF" CGO_CXXFLAGS="$CGO_NTDEF" go build $V $R -o /build/$NAME-windows-amd64$R.exe ./$PACK
|
CC=x86_64-w64-mingw32-gcc CXX=x86_64-w64-mingw32-g++ GOOS=windows GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} CGO_CFLAGS="$CGO_NTDEF" CGO_CXXFLAGS="$CGO_NTDEF" go build $V $R -ldflags="${LDFLAGS}" -o /build/$NAME-windows-amd64$R.exe ./$PACK
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ $XGOARCH == "." ] || [ $XGOARCH == "386" ]; then
|
if [ $XGOARCH == "." ] || [ $XGOARCH == "386" ]; then
|
||||||
@ -178,7 +185,7 @@ for TARGET in $TARGETS; do
|
|||||||
export PKG_CONFIG_PATH=/usr/i686-w64-mingw32/lib/pkgconfig
|
export PKG_CONFIG_PATH=/usr/i686-w64-mingw32/lib/pkgconfig
|
||||||
|
|
||||||
CC=i686-w64-mingw32-gcc CXX=i686-w64-mingw32-g++ GOOS=windows GOARCH=386 CGO_ENABLED=${CGO_ENABLED} CGO_CFLAGS="$CGO_NTDEF" CGO_CXXFLAGS="$CGO_NTDEF" go get -d ./$PACK
|
CC=i686-w64-mingw32-gcc CXX=i686-w64-mingw32-g++ GOOS=windows GOARCH=386 CGO_ENABLED=${CGO_ENABLED} CGO_CFLAGS="$CGO_NTDEF" CGO_CXXFLAGS="$CGO_NTDEF" go get -d ./$PACK
|
||||||
CC=i686-w64-mingw32-gcc CXX=i686-w64-mingw32-g++ GOOS=windows GOARCH=386 CGO_ENABLED=${CGO_ENABLED} CGO_CFLAGS="$CGO_NTDEF" CGO_CXXFLAGS="$CGO_NTDEF" go build $V -o /build/$NAME-windows-386.exe ./$PACK
|
CC=i686-w64-mingw32-gcc CXX=i686-w64-mingw32-g++ GOOS=windows GOARCH=386 CGO_ENABLED=${CGO_ENABLED} CGO_CFLAGS="$CGO_NTDEF" CGO_CXXFLAGS="$CGO_NTDEF" go build $V -ldflags="${LDFLAGS}" -o /build/$NAME-windows-386.exe ./$PACK
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -187,13 +194,13 @@ for TARGET in $TARGETS; do
|
|||||||
echo "Compiling $PACK for darwin/amd64..."
|
echo "Compiling $PACK for darwin/amd64..."
|
||||||
CC=o64-clang CXX=o64-clang++ HOST=x86_64-apple-darwin10 PREFIX=/usr/local $BUILD_DEPS /deps $LIST_DEPS
|
CC=o64-clang CXX=o64-clang++ HOST=x86_64-apple-darwin10 PREFIX=/usr/local $BUILD_DEPS /deps $LIST_DEPS
|
||||||
CC=o64-clang CXX=o64-clang++ GOOS=darwin GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} go get -d ./$PACK
|
CC=o64-clang CXX=o64-clang++ GOOS=darwin GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} go get -d ./$PACK
|
||||||
CC=o64-clang CXX=o64-clang++ GOOS=darwin GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} go build -ldflags=-s $V $R -o /build/$NAME-darwin-amd64$R ./$PACK
|
CC=o64-clang CXX=o64-clang++ GOOS=darwin GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} go build $V $R -ldflags="-s ${LDFLAGS}" -o /build/$NAME-darwin-amd64$R ./$PACK
|
||||||
fi
|
fi
|
||||||
if ([ $XGOOS == "." ] || [ $XGOOS == "darwin" ]) && ([ $XGOARCH == "." ] || [ $XGOARCH == "386" ]); then
|
if ([ $XGOOS == "." ] || [ $XGOOS == "darwin" ]) && ([ $XGOARCH == "." ] || [ $XGOARCH == "386" ]); then
|
||||||
echo "Compiling for darwin/386..."
|
echo "Compiling for darwin/386..."
|
||||||
CC=o32-clang CXX=o32-clang++ HOST=i386-apple-darwin10 PREFIX=/usr/local $BUILD_DEPS /deps $LIST_DEPS
|
CC=o32-clang CXX=o32-clang++ HOST=i386-apple-darwin10 PREFIX=/usr/local $BUILD_DEPS /deps $LIST_DEPS
|
||||||
CC=o32-clang CXX=o32-clang++ GOOS=darwin GOARCH=386 CGO_ENABLED=${CGO_ENABLED} go get -d ./$PACK
|
CC=o32-clang CXX=o32-clang++ GOOS=darwin GOARCH=386 CGO_ENABLED=${CGO_ENABLED} go get -d ./$PACK
|
||||||
CC=o32-clang CXX=o32-clang++ GOOS=darwin GOARCH=386 CGO_ENABLED=${CGO_ENABLED} go build $V -o /build/$NAME-darwin-386 ./$PACK
|
CC=o32-clang CXX=o32-clang++ GOOS=darwin GOARCH=386 CGO_ENABLED=${CGO_ENABLED} go build $V -ldflags="${LDFLAGS}" -o /build/$NAME-darwin-386 ./$PACK
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
FROM tudorg/xgo-1.9.2
|
FROM tudorg/xgo-1.9.4
|
||||||
|
|
||||||
MAINTAINER Tudor Golubenco <tudor@elastic.co>
|
MAINTAINER Tudor Golubenco <tudor@elastic.co>
|
||||||
|
|
||||||
|
2
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/build.sh
generated
vendored
2
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/build.sh
generated
vendored
@ -3,5 +3,5 @@
|
|||||||
cp -r ../../../vendor/gopkg.in/yaml.v2 beats-builder/yaml.v2
|
cp -r ../../../vendor/gopkg.in/yaml.v2 beats-builder/yaml.v2
|
||||||
cp -r ../../../vendor/github.com/tsg/gotpl beats-builder/gotpl
|
cp -r ../../../vendor/github.com/tsg/gotpl beats-builder/gotpl
|
||||||
docker pull tudorg/xgo-base:v20180222 && \
|
docker pull tudorg/xgo-base:v20180222 && \
|
||||||
docker build --rm=true -t tudorg/xgo-1.9.2 go-1.9.2/ &&
|
docker build --rm=true -t tudorg/xgo-1.9.4 go-1.9.4/ &&
|
||||||
docker build --rm=true -t tudorg/beats-builder beats-builder
|
docker build --rm=true -t tudorg/beats-builder beats-builder
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# Go cross compiler (xgo): Go 1.9.2 layer
|
# Go cross compiler (xgo): Go 1.9.4 layer
|
||||||
# Copyright (c) 2014 Péter Szilágyi. All rights reserved.
|
# Copyright (c) 2014 Péter Szilágyi. All rights reserved.
|
||||||
#
|
#
|
||||||
# Released under the MIT license.
|
# Released under the MIT license.
|
||||||
@ -9,7 +9,7 @@ MAINTAINER Tudor Golubenco <tudor@elastic.co>
|
|||||||
|
|
||||||
# Configure the root Go distribution and bootstrap based on it
|
# Configure the root Go distribution and bootstrap based on it
|
||||||
RUN \
|
RUN \
|
||||||
export ROOT_DIST="https://storage.googleapis.com/golang/go1.9.2.linux-amd64.tar.gz" && \
|
export ROOT_DIST="https://storage.googleapis.com/golang/go1.9.4.linux-amd64.tar.gz" && \
|
||||||
export ROOT_DIST_SHA1="94c889e039e3d2e94ed95e8f8cb747c5bc1c2b58" && \
|
export ROOT_DIST_SHA1="ed1bd37c356338a5a04923c183931a96687f202e" && \
|
||||||
\
|
\
|
||||||
$BOOTSTRAP_PURE
|
$BOOTSTRAP_PURE
|
2
vendor/github.com/elastic/beats/dev-tools/packer/platforms/binary/build.sh
generated
vendored
2
vendor/github.com/elastic/beats/dev-tools/packer/platforms/binary/build.sh
generated
vendored
@ -12,7 +12,7 @@ cat ${BUILD_DIR}/package.yml ${ARCHDIR}/archs/$ARCH.yml > ${BUILD_DIR}/settings-
|
|||||||
gotpl ${BASEDIR}/run.sh.j2 < ${BUILD_DIR}/settings-$runid.yml > ${BUILD_DIR}/run-$runid.sh
|
gotpl ${BASEDIR}/run.sh.j2 < ${BUILD_DIR}/settings-$runid.yml > ${BUILD_DIR}/run-$runid.sh
|
||||||
chmod +x ${BUILD_DIR}/run-$runid.sh
|
chmod +x ${BUILD_DIR}/run-$runid.sh
|
||||||
|
|
||||||
docker run --rm -v ${BUILD_DIR}:/build \
|
docker run --rm -v ${BUILD_DIR}:/build -v ${UPLOAD_DIR}:/upload \
|
||||||
-e BUILDID=$BUILDID -e SNAPSHOT=$SNAPSHOT -e RUNID=$runid \
|
-e BUILDID=$BUILDID -e SNAPSHOT=$SNAPSHOT -e RUNID=$runid \
|
||||||
tudorg/fpm /build/run-$runid.sh
|
tudorg/fpm /build/run-$runid.sh
|
||||||
|
|
||||||
|
18
vendor/github.com/elastic/beats/dev-tools/packer/platforms/binary/run.sh.j2
generated
vendored
18
vendor/github.com/elastic/beats/dev-tools/packer/platforms/binary/run.sh.j2
generated
vendored
@ -10,18 +10,20 @@ if [ "$SNAPSHOT" = "yes" ]; then
|
|||||||
VERSION="${VERSION}-SNAPSHOT"
|
VERSION="${VERSION}-SNAPSHOT"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
BEATS_YML_NAME="{{.beat_name}}-linux-{{.arch}}"
|
||||||
|
[ -f "${BEATS_YML_NAME}.yml" ] || BEATS_YML_NAME="{{.beat_name}}-linux"
|
||||||
|
|
||||||
mkdir /{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}
|
mkdir /{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}
|
||||||
cp -a homedir/. /{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}/
|
cp -a homedir/. /{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}/
|
||||||
cp {{.beat_name}}-linux-{{.arch}} /{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}/{{.beat_name}}
|
cp {{.beat_name}}-linux-{{.arch}} /{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}/{{.beat_name}}
|
||||||
cp {{.beat_name}}-linux.yml /{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}/{{.beat_name}}.yml
|
cp ${BEATS_YML_NAME}.yml /{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}/{{.beat_name}}.yml
|
||||||
cp {{.beat_name}}-linux.reference.yml /{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}/{{.beat_name}}.reference.yml
|
cp ${BEATS_YML_NAME}.reference.yml /{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}/{{.beat_name}}.reference.yml
|
||||||
cp fields.yml /{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}/
|
cp fields.yml /{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}/
|
||||||
cp -a modules.d-linux/ /{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}/modules.d || true
|
cp -a modules.d-linux/ /{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}/modules.d || true
|
||||||
|
|
||||||
mkdir -p upload
|
tar czvf /upload/{{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-linux-{{.bin_arch}}.tar.gz /{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}
|
||||||
tar czvf upload/{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}.tar.gz /{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}
|
echo "Created /upload/{{.beat_name}}{{.beat_pkg_suffix}}--${VERSION}-linux-{{.bin_arch}}.tar.gz"
|
||||||
echo "Created upload/{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}.tar.gz"
|
|
||||||
|
|
||||||
cd upload
|
cd /upload
|
||||||
sha512sum {{.beat_name}}-${VERSION}-linux-{{.bin_arch}}.tar.gz > {{.beat_name}}-${VERSION}-linux-{{.bin_arch}}.tar.gz.sha512
|
sha512sum {{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-linux-{{.bin_arch}}.tar.gz > {{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-linux-{{.bin_arch}}.tar.gz.sha512
|
||||||
echo "Created upload/{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}.tar.gz.sha512"
|
echo "Created /upload/{{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-linux-{{.bin_arch}}.tar.gz.sha512"
|
||||||
|
2
vendor/github.com/elastic/beats/dev-tools/packer/platforms/centos/build.sh
generated
vendored
2
vendor/github.com/elastic/beats/dev-tools/packer/platforms/centos/build.sh
generated
vendored
@ -16,7 +16,7 @@ gotpl ${BASEDIR}/systemd.j2 < ${BUILD_DIR}/settings-$runid.yml > ${BUILD_DIR}/$r
|
|||||||
gotpl ${BASEDIR}/beatname.sh.j2 < ${BUILD_DIR}/settings-$runid.yml > ${BUILD_DIR}/beatname-$runid.sh
|
gotpl ${BASEDIR}/beatname.sh.j2 < ${BUILD_DIR}/settings-$runid.yml > ${BUILD_DIR}/beatname-$runid.sh
|
||||||
chmod +x ${BUILD_DIR}/beatname-$runid.sh
|
chmod +x ${BUILD_DIR}/beatname-$runid.sh
|
||||||
|
|
||||||
docker run --rm -v ${BUILD_DIR}:/build \
|
docker run --rm -v ${BUILD_DIR}:/build -v ${UPLOAD_DIR}:/upload \
|
||||||
-e BUILDID=$BUILDID -e SNAPSHOT=$SNAPSHOT -e RUNID=$runid \
|
-e BUILDID=$BUILDID -e SNAPSHOT=$SNAPSHOT -e RUNID=$runid \
|
||||||
tudorg/fpm /build/run-$runid.sh
|
tudorg/fpm /build/run-$runid.sh
|
||||||
|
|
||||||
|
20
vendor/github.com/elastic/beats/dev-tools/packer/platforms/centos/run.sh.j2
generated
vendored
20
vendor/github.com/elastic/beats/dev-tools/packer/platforms/centos/run.sh.j2
generated
vendored
@ -17,13 +17,16 @@ if [ "$SNAPSHOT" = "yes" ]; then
|
|||||||
VERSION="${VERSION}-SNAPSHOT"
|
VERSION="${VERSION}-SNAPSHOT"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
BEATS_YML_NAME="{{.beat_name}}-linux-{{.arch}}"
|
||||||
|
[ -f "${BEATS_YML_NAME}.yml" ] || BEATS_YML_NAME="{{.beat_name}}-linux"
|
||||||
|
|
||||||
# fpm replaces - with _ in the version
|
# fpm replaces - with _ in the version
|
||||||
RPM_VERSION=`echo ${VERSION} | sed 's/-/_/g'`
|
RPM_VERSION=`echo ${VERSION} | sed 's/-/_/g'`
|
||||||
|
|
||||||
# create rpm
|
# create rpm
|
||||||
FPM_ARGS=(
|
FPM_ARGS=(
|
||||||
--force -s dir -t rpm
|
--force -s dir -t rpm
|
||||||
-n {{.beat_pkg_name}} -v ${RPM_VERSION}
|
-n {{.beat_pkg_name}}{{.beat_pkg_suffix}} -v ${RPM_VERSION}
|
||||||
--architecture {{.rpm_arch}}
|
--architecture {{.rpm_arch}}
|
||||||
--vendor "{{.beat_vendor}}"
|
--vendor "{{.beat_vendor}}"
|
||||||
--license "{{.beat_license}}"
|
--license "{{.beat_license}}"
|
||||||
@ -35,8 +38,8 @@ FPM_ARGS=(
|
|||||||
homedir/=/usr/share/{{.beat_name}}
|
homedir/=/usr/share/{{.beat_name}}
|
||||||
beatname-${RUNID}.sh=/usr/bin/{{.beat_name}}
|
beatname-${RUNID}.sh=/usr/bin/{{.beat_name}}
|
||||||
{{.beat_name}}-linux-{{.arch}}=/usr/share/{{.beat_name}}/bin/{{.beat_name}}
|
{{.beat_name}}-linux-{{.arch}}=/usr/share/{{.beat_name}}/bin/{{.beat_name}}
|
||||||
{{.beat_name}}-linux.yml=/etc/{{.beat_name}}/{{.beat_name}}.yml
|
${BEATS_YML_NAME}.yml=/etc/{{.beat_name}}/{{.beat_name}}.yml
|
||||||
{{.beat_name}}-linux.reference.yml=/etc/{{.beat_name}}/{{.beat_name}}.reference.yml
|
${BEATS_YML_NAME}.reference.yml=/etc/{{.beat_name}}/{{.beat_name}}.reference.yml
|
||||||
fields.yml=/etc/{{.beat_name}}/fields.yml
|
fields.yml=/etc/{{.beat_name}}/fields.yml
|
||||||
${RUNID}.service=/lib/systemd/system/{{.beat_pkg_name}}.service
|
${RUNID}.service=/lib/systemd/system/{{.beat_pkg_name}}.service
|
||||||
god-linux-{{.arch}}=/usr/share/{{.beat_name}}/bin/{{.beat_name}}-god
|
god-linux-{{.arch}}=/usr/share/{{.beat_name}}/bin/{{.beat_name}}-god
|
||||||
@ -49,11 +52,10 @@ fi
|
|||||||
fpm "${FPM_ARGS[@]}"
|
fpm "${FPM_ARGS[@]}"
|
||||||
|
|
||||||
# rename so that the filename respects semver rules
|
# rename so that the filename respects semver rules
|
||||||
mkdir -p upload
|
mv {{.beat_pkg_name}}{{.beat_pkg_suffix}}-${RPM_VERSION}-1.{{.rpm_arch}}.rpm /upload/{{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-{{.rpm_arch}}.rpm
|
||||||
mv {{.beat_pkg_name}}-${RPM_VERSION}-1.{{.rpm_arch}}.rpm upload/{{.beat_name}}-${VERSION}-{{.rpm_arch}}.rpm
|
echo "Created /upload/{{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-{{.rpm_arch}}.rpm"
|
||||||
echo "Created upload/{{.beat_name}}-${VERSION}-{{.rpm_arch}}.rpm"
|
|
||||||
|
|
||||||
# create sha512 file
|
# create sha512 file
|
||||||
cd upload
|
cd /upload
|
||||||
sha512sum {{.beat_name}}-${VERSION}-{{.rpm_arch}}.rpm > {{.beat_name}}-${VERSION}-{{.rpm_arch}}.rpm.sha512
|
sha512sum {{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-{{.rpm_arch}}.rpm > {{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-{{.rpm_arch}}.rpm.sha512
|
||||||
echo "Created upload/{{.beat_name}}-${VERSION}-{{.rpm_arch}}.rpm.sha512"
|
echo "Created /upload/{{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-{{.rpm_arch}}.rpm.sha512"
|
||||||
|
2
vendor/github.com/elastic/beats/dev-tools/packer/platforms/darwin/build.sh
generated
vendored
2
vendor/github.com/elastic/beats/dev-tools/packer/platforms/darwin/build.sh
generated
vendored
@ -12,7 +12,7 @@ cat ${BUILD_DIR}/package.yml ${ARCHDIR}/archs/$ARCH.yml > ${BUILD_DIR}/settings-
|
|||||||
gotpl ${BASEDIR}/run.sh.j2 < ${BUILD_DIR}/settings-$runid.yml > ${BUILD_DIR}/run-$runid.sh
|
gotpl ${BASEDIR}/run.sh.j2 < ${BUILD_DIR}/settings-$runid.yml > ${BUILD_DIR}/run-$runid.sh
|
||||||
chmod +x ${BUILD_DIR}/run-$runid.sh
|
chmod +x ${BUILD_DIR}/run-$runid.sh
|
||||||
|
|
||||||
docker run --rm -v ${BUILD_DIR}:/build \
|
docker run --rm -v ${BUILD_DIR}:/build -v ${UPLOAD_DIR}:/upload \
|
||||||
-e BUILDID=$BUILDID -e SNAPSHOT=$SNAPSHOT -e RUNID=$runid \
|
-e BUILDID=$BUILDID -e SNAPSHOT=$SNAPSHOT -e RUNID=$runid \
|
||||||
tudorg/fpm /build/run-$runid.sh
|
tudorg/fpm /build/run-$runid.sh
|
||||||
|
|
||||||
|
11
vendor/github.com/elastic/beats/dev-tools/packer/platforms/darwin/run.sh.j2
generated
vendored
11
vendor/github.com/elastic/beats/dev-tools/packer/platforms/darwin/run.sh.j2
generated
vendored
@ -18,10 +18,9 @@ cp {{.beat_name}}-darwin.reference.yml /{{.beat_name}}-${VERSION}-darwin-x86_64/
|
|||||||
cp fields.yml /{{.beat_name}}-${VERSION}-darwin-x86_64/
|
cp fields.yml /{{.beat_name}}-${VERSION}-darwin-x86_64/
|
||||||
cp -a modules.d-darwin/ /{{.beat_name}}-${VERSION}-darwin-x86_64/modules.d || true
|
cp -a modules.d-darwin/ /{{.beat_name}}-${VERSION}-darwin-x86_64/modules.d || true
|
||||||
|
|
||||||
mkdir -p upload
|
tar czvf /upload/{{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-darwin-x86_64.tar.gz /{{.beat_name}}-${VERSION}-darwin-x86_64
|
||||||
tar czvf upload/{{.beat_name}}-${VERSION}-darwin-x86_64.tar.gz /{{.beat_name}}-${VERSION}-darwin-x86_64
|
echo "Created /upload/{{.beat_name}}-${VERSION}-darwin-x86_64.tar.gz"
|
||||||
echo "Created upload/{{.beat_name}}-${VERSION}-darwin-x86_64.tar.gz"
|
|
||||||
|
|
||||||
cd upload
|
cd /upload
|
||||||
sha512sum {{.beat_name}}-${VERSION}-darwin-x86_64.tar.gz > {{.beat_name}}-${VERSION}-darwin-x86_64.tar.gz.sha512
|
sha512sum {{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-darwin-x86_64.tar.gz > {{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-darwin-x86_64.tar.gz.sha512
|
||||||
echo "Created upload/{{.beat_name}}-${VERSION}-darwin-x86_64.tar.gz.sha512"
|
echo "Created /upload/{{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-darwin-x86_64.tar.gz.sha512"
|
||||||
|
2
vendor/github.com/elastic/beats/dev-tools/packer/platforms/dashboards/build.sh
generated
vendored
2
vendor/github.com/elastic/beats/dev-tools/packer/platforms/dashboards/build.sh
generated
vendored
@ -11,7 +11,7 @@ cat ${ARCHDIR}/version.yml > ${BUILD_DIR}/settings-$runid.yml
|
|||||||
gotpl ${BASEDIR}/run.sh.j2 < ${BUILD_DIR}/settings-$runid.yml > ${BUILD_DIR}/run-$runid.sh
|
gotpl ${BASEDIR}/run.sh.j2 < ${BUILD_DIR}/settings-$runid.yml > ${BUILD_DIR}/run-$runid.sh
|
||||||
chmod +x ${BUILD_DIR}/run-$runid.sh
|
chmod +x ${BUILD_DIR}/run-$runid.sh
|
||||||
|
|
||||||
docker run --rm -v ${BUILD_DIR}:/build \
|
docker run --rm -v ${BUILD_DIR}:/build -v ${UPLOAD_DIR}:/upload \
|
||||||
-e BUILDID=$BUILDID -e SNAPSHOT=$SNAPSHOT -e RUNID=$runid -e BEAT_NAME=$BEAT_NAME \
|
-e BUILDID=$BUILDID -e SNAPSHOT=$SNAPSHOT -e RUNID=$runid -e BEAT_NAME=$BEAT_NAME \
|
||||||
tudorg/fpm /build/run-$runid.sh
|
tudorg/fpm /build/run-$runid.sh
|
||||||
|
|
||||||
|
9
vendor/github.com/elastic/beats/dev-tools/packer/platforms/dashboards/run.sh.j2
generated
vendored
9
vendor/github.com/elastic/beats/dev-tools/packer/platforms/dashboards/run.sh.j2
generated
vendored
@ -14,10 +14,9 @@ mkdir /${BEAT_NAME:-beats}-dashboards-${VERSION}
|
|||||||
cp -a dashboards/. /${BEAT_NAME:-beats}-dashboards-${VERSION}/
|
cp -a dashboards/. /${BEAT_NAME:-beats}-dashboards-${VERSION}/
|
||||||
echo "$BUILDID" > /${BEAT_NAME:-beats}-dashboards-${VERSION}/.build_hash.txt
|
echo "$BUILDID" > /${BEAT_NAME:-beats}-dashboards-${VERSION}/.build_hash.txt
|
||||||
|
|
||||||
mkdir -p upload
|
zip -r /upload/${BEAT_NAME:-beats}-dashboards-${VERSION}.zip /${BEAT_NAME:-beats}-dashboards-${VERSION}
|
||||||
zip -r upload/${BEAT_NAME:-beats}-dashboards-${VERSION}.zip /${BEAT_NAME:-beats}-dashboards-${VERSION}
|
echo "Created /upload/${BEAT_NAME:-beats}-dashboards-${VERSION}.zip"
|
||||||
echo "Created upload/${BEAT_NAME:-beats}-dashboards-${VERSION}.zip"
|
|
||||||
|
|
||||||
cd upload
|
cd /upload
|
||||||
sha512sum ${BEAT_NAME:-beats}-dashboards-${VERSION}.zip > ${BEAT_NAME:-beats}-dashboards-${VERSION}.zip.sha512
|
sha512sum ${BEAT_NAME:-beats}-dashboards-${VERSION}.zip > ${BEAT_NAME:-beats}-dashboards-${VERSION}.zip.sha512
|
||||||
echo "Created upload/${BEAT_NAME:-beats}-dashboards-${VERSION}.zip.sha512"
|
echo "Created /upload/${BEAT_NAME:-beats}-dashboards-${VERSION}.zip.sha512"
|
||||||
|
2
vendor/github.com/elastic/beats/dev-tools/packer/platforms/debian/build.sh
generated
vendored
2
vendor/github.com/elastic/beats/dev-tools/packer/platforms/debian/build.sh
generated
vendored
@ -16,7 +16,7 @@ gotpl ${BASEDIR}/systemd.j2 < ${BUILD_DIR}/settings-$runid.yml > ${BUILD_DIR}/$r
|
|||||||
gotpl ${BASEDIR}/beatname.sh.j2 < ${BUILD_DIR}/settings-$runid.yml > ${BUILD_DIR}/beatname-$runid.sh
|
gotpl ${BASEDIR}/beatname.sh.j2 < ${BUILD_DIR}/settings-$runid.yml > ${BUILD_DIR}/beatname-$runid.sh
|
||||||
chmod +x ${BUILD_DIR}/beatname-$runid.sh
|
chmod +x ${BUILD_DIR}/beatname-$runid.sh
|
||||||
|
|
||||||
docker run --rm -v ${BUILD_DIR}:/build \
|
docker run --rm -v ${BUILD_DIR}:/build -v ${UPLOAD_DIR}:/upload \
|
||||||
-e BUILDID=$BUILDID -e SNAPSHOT=$SNAPSHOT -e RUNID=$runid \
|
-e BUILDID=$BUILDID -e SNAPSHOT=$SNAPSHOT -e RUNID=$runid \
|
||||||
tudorg/fpm /build/run-$runid.sh
|
tudorg/fpm /build/run-$runid.sh
|
||||||
|
|
||||||
|
22
vendor/github.com/elastic/beats/dev-tools/packer/platforms/debian/run.sh.j2
generated
vendored
22
vendor/github.com/elastic/beats/dev-tools/packer/platforms/debian/run.sh.j2
generated
vendored
@ -17,12 +17,15 @@ if [ "$SNAPSHOT" = "yes" ]; then
|
|||||||
VERSION="${VERSION}-SNAPSHOT"
|
VERSION="${VERSION}-SNAPSHOT"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
BEATS_YML_NAME="{{.beat_name}}-linux-{{.arch}}"
|
||||||
|
[ -f "${BEATS_YML_NAME}.yml" ] || BEATS_YML_NAME="{{.beat_name}}-linux"
|
||||||
|
|
||||||
# create deb
|
# create deb
|
||||||
FPM_ARGS=(
|
FPM_ARGS=(
|
||||||
--force -s dir -t deb
|
--force -s dir -t deb
|
||||||
-n {{.beat_pkg_name}} -v ${VERSION}
|
-n {{.beat_pkg_name}}{{.beat_pkg_suffix}} -v ${VERSION}
|
||||||
--vendor "{{.beat_vendor}}"
|
--vendor "{{.beat_vendor}}"
|
||||||
--license "{{.beat_license}}"
|
--license $(echo {{.beat_license}} | tr " " "-")
|
||||||
--architecture {{.deb_arch}}
|
--architecture {{.deb_arch}}
|
||||||
--description "{{.beat_description}}"
|
--description "{{.beat_description}}"
|
||||||
--url {{.beat_url}}
|
--url {{.beat_url}}
|
||||||
@ -32,8 +35,8 @@ FPM_ARGS=(
|
|||||||
homedir/=/usr/share/{{.beat_name}}
|
homedir/=/usr/share/{{.beat_name}}
|
||||||
beatname-${RUNID}.sh=/usr/bin/{{.beat_name}}
|
beatname-${RUNID}.sh=/usr/bin/{{.beat_name}}
|
||||||
{{.beat_name}}-linux-{{.arch}}=/usr/share/{{.beat_name}}/bin/{{.beat_name}}
|
{{.beat_name}}-linux-{{.arch}}=/usr/share/{{.beat_name}}/bin/{{.beat_name}}
|
||||||
{{.beat_name}}-linux.yml=/etc/{{.beat_name}}/{{.beat_name}}.yml
|
${BEATS_YML_NAME}.yml=/etc/{{.beat_name}}/{{.beat_name}}.yml
|
||||||
{{.beat_name}}-linux.reference.yml=/etc/{{.beat_name}}/{{.beat_name}}.reference.yml
|
${BEATS_YML_NAME}.reference.yml=/etc/{{.beat_name}}/{{.beat_name}}.reference.yml
|
||||||
fields.yml=/etc/{{.beat_name}}/fields.yml
|
fields.yml=/etc/{{.beat_name}}/fields.yml
|
||||||
${RUNID}.service=/lib/systemd/system/{{.beat_pkg_name}}.service
|
${RUNID}.service=/lib/systemd/system/{{.beat_pkg_name}}.service
|
||||||
god-linux-{{.arch}}=/usr/share/{{.beat_name}}/bin/{{.beat_name}}-god
|
god-linux-{{.arch}}=/usr/share/{{.beat_name}}/bin/{{.beat_name}}-god
|
||||||
@ -46,11 +49,10 @@ fi
|
|||||||
fpm "${FPM_ARGS[@]}"
|
fpm "${FPM_ARGS[@]}"
|
||||||
|
|
||||||
# move and rename to use the elastic conventions
|
# move and rename to use the elastic conventions
|
||||||
mkdir -p upload
|
mv {{.beat_pkg_name}}{{.beat_pkg_suffix}}_${VERSION}_{{.deb_arch}}.deb /upload/{{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-{{.deb_arch}}.deb
|
||||||
mv {{.beat_pkg_name}}_${VERSION}_{{.deb_arch}}.deb upload/{{.beat_name}}-${VERSION}-{{.deb_arch}}.deb
|
echo "Created /upload/{{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-{{.deb_arch}}.deb"
|
||||||
echo "Created upload/{{.beat_name}}-${VERSION}-{{.deb_arch}}.deb"
|
|
||||||
|
|
||||||
# create sha512 file
|
# create sha512 file
|
||||||
cd upload
|
cd /upload
|
||||||
sha512sum {{.beat_name}}-${VERSION}-{{.deb_arch}}.deb > {{.beat_name}}-${VERSION}-{{.deb_arch}}.deb.sha512
|
sha512sum {{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-{{.deb_arch}}.deb > {{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-{{.deb_arch}}.deb.sha512
|
||||||
echo "Created upload/{{.beat_name}}-${VERSION}-{{.deb_arch}}.deb.sha512"
|
echo "Created /upload/{{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-{{.deb_arch}}.deb.sha512"
|
||||||
|
2
vendor/github.com/elastic/beats/dev-tools/packer/platforms/windows/build.sh
generated
vendored
2
vendor/github.com/elastic/beats/dev-tools/packer/platforms/windows/build.sh
generated
vendored
@ -14,7 +14,7 @@ gotpl ${BASEDIR}/install-service.ps1.j2 < ${BUILD_DIR}/settings-$runid.yml > ${B
|
|||||||
gotpl ${BASEDIR}/uninstall-service.ps1.j2 < ${BUILD_DIR}/settings-$runid.yml > ${BUILD_DIR}/uninstall-service-$BEAT.ps1
|
gotpl ${BASEDIR}/uninstall-service.ps1.j2 < ${BUILD_DIR}/settings-$runid.yml > ${BUILD_DIR}/uninstall-service-$BEAT.ps1
|
||||||
chmod +x ${BUILD_DIR}/run-$runid.sh
|
chmod +x ${BUILD_DIR}/run-$runid.sh
|
||||||
|
|
||||||
docker run --rm -v ${BUILD_DIR}:/build \
|
docker run --rm -v ${BUILD_DIR}:/build -v ${UPLOAD_DIR}:/upload \
|
||||||
-e BUILDID=$BUILDID -e SNAPSHOT=$SNAPSHOT -e RUNID=$runid \
|
-e BUILDID=$BUILDID -e SNAPSHOT=$SNAPSHOT -e RUNID=$runid \
|
||||||
tudorg/fpm /build/run-$runid.sh
|
tudorg/fpm /build/run-$runid.sh
|
||||||
|
|
||||||
|
@ -11,4 +11,4 @@ $workdir = Split-Path $MyInvocation.MyCommand.Path
|
|||||||
# create new service
|
# create new service
|
||||||
New-Service -name {{.beat_name}} `
|
New-Service -name {{.beat_name}} `
|
||||||
-displayName {{.beat_name}} `
|
-displayName {{.beat_name}} `
|
||||||
-binaryPathName "`"$workdir\\{{.beat_name}}.exe`" -c `"$workdir\\{{.beat_name}}.yml`" -path.home `"$workdir`" -path.data `"C:\\ProgramData\\{{.beat_name}}`" -path.logs `"C:\\ProgramData\\{{.beat_name}}\logs`""
|
-binaryPathName "`"$workdir\{{.beat_name}}.exe`" -c `"$workdir\{{.beat_name}}.yml`" -path.home `"$workdir`" -path.data `"C:\ProgramData\{{.beat_name}}`" -path.logs `"C:\ProgramData\{{.beat_name}}\logs`""
|
||||||
|
11
vendor/github.com/elastic/beats/dev-tools/packer/platforms/windows/run.sh.j2
generated
vendored
11
vendor/github.com/elastic/beats/dev-tools/packer/platforms/windows/run.sh.j2
generated
vendored
@ -21,10 +21,9 @@ cp -a modules.d-win/ /{{.beat_name}}-${VERSION}-windows-{{.win_arch}}/modules.d
|
|||||||
cp install-service-{{.beat_name}}.ps1 /{{.beat_name}}-${VERSION}-windows-{{.win_arch}}/
|
cp install-service-{{.beat_name}}.ps1 /{{.beat_name}}-${VERSION}-windows-{{.win_arch}}/
|
||||||
cp uninstall-service-{{.beat_name}}.ps1 /{{.beat_name}}-${VERSION}-windows-{{.win_arch}}/
|
cp uninstall-service-{{.beat_name}}.ps1 /{{.beat_name}}-${VERSION}-windows-{{.win_arch}}/
|
||||||
|
|
||||||
mkdir -p upload
|
zip -r /upload/{{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-windows-{{.win_arch}}.zip /{{.beat_name}}-${VERSION}-windows-{{.win_arch}}
|
||||||
zip -r upload/{{.beat_name}}-${VERSION}-windows-{{.win_arch}}.zip /{{.beat_name}}-${VERSION}-windows-{{.win_arch}}
|
echo "Created /upload/{{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-windows-{{.win_arch}}.zip"
|
||||||
echo "Created upload/{{.beat_name}}-${VERSION}-windows-{{.win_arch}}.zip"
|
|
||||||
|
|
||||||
cd upload
|
cd /upload
|
||||||
sha512sum {{.beat_name}}-${VERSION}-windows-{{.win_arch}}.zip > {{.beat_name}}-${VERSION}-windows-{{.win_arch}}.zip.sha512
|
sha512sum {{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-windows-{{.win_arch}}.zip > {{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-windows-{{.win_arch}}.zip.sha512
|
||||||
echo "Created upload/{{.beat_name}}-${VERSION}-windows-{{.win_arch}}.zip.sha512"
|
echo "Created /upload/{{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-windows-{{.win_arch}}.zip.sha512"
|
||||||
|
2
vendor/github.com/elastic/beats/dev-tools/packer/version.yml
generated
vendored
2
vendor/github.com/elastic/beats/dev-tools/packer/version.yml
generated
vendored
@ -1 +1 @@
|
|||||||
version: "6.2.4"
|
version: "6.3.3"
|
||||||
|
3
vendor/github.com/elastic/beats/dev-tools/packer/xgo-scripts/before_build.sh
generated
vendored
3
vendor/github.com/elastic/beats/dev-tools/packer/xgo-scripts/before_build.sh
generated
vendored
@ -18,6 +18,7 @@ cp fields.yml $PREFIX/fields.yml
|
|||||||
# linux
|
# linux
|
||||||
cp $BEAT_NAME.yml $PREFIX/$BEAT_NAME-linux.yml
|
cp $BEAT_NAME.yml $PREFIX/$BEAT_NAME-linux.yml
|
||||||
chmod 0600 $PREFIX/$BEAT_NAME-linux.yml
|
chmod 0600 $PREFIX/$BEAT_NAME-linux.yml
|
||||||
|
chmod 0600 $PREFIX/$BEAT_NAME-linux-386.yml || true
|
||||||
cp $BEAT_NAME.reference.yml $PREFIX/$BEAT_NAME-linux.reference.yml
|
cp $BEAT_NAME.reference.yml $PREFIX/$BEAT_NAME-linux.reference.yml
|
||||||
rm -rf $PREFIX/modules.d-linux
|
rm -rf $PREFIX/modules.d-linux
|
||||||
cp -r modules.d/ $PREFIX/modules.d-linux || true
|
cp -r modules.d/ $PREFIX/modules.d-linux || true
|
||||||
@ -44,7 +45,7 @@ PREFIX=$PREFIX make before-build
|
|||||||
|
|
||||||
# Add data to the home directory
|
# Add data to the home directory
|
||||||
mkdir -p $PREFIX/homedir
|
mkdir -p $PREFIX/homedir
|
||||||
make install-home HOME_PREFIX=$PREFIX/homedir
|
make install-home HOME_PREFIX=$PREFIX/homedir LICENSE_FILE=${LICENSE_FILE}
|
||||||
|
|
||||||
if [ -n "BUILDID" ]; then
|
if [ -n "BUILDID" ]; then
|
||||||
echo "$BUILDID" > $PREFIX/homedir/.build_hash.txt
|
echo "$BUILDID" > $PREFIX/homedir/.build_hash.txt
|
||||||
|
35
vendor/github.com/elastic/beats/dev-tools/promote_docs
generated
vendored
Executable file
35
vendor/github.com/elastic/beats/dev-tools/promote_docs
generated
vendored
Executable file
@ -0,0 +1,35 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
import argparse
|
||||||
|
from subprocess import check_call
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Used to promote doc version and branch. Doesn't commit changes.")
|
||||||
|
parser.add_argument("version",
|
||||||
|
help="The new docs version")
|
||||||
|
parser.add_argument("branch",
|
||||||
|
help="The new docs branch")
|
||||||
|
args = parser.parse_args()
|
||||||
|
version = args.version
|
||||||
|
branch = args.branch
|
||||||
|
|
||||||
|
# make sure we have no dirty files in this branch (might throw off `make update`)
|
||||||
|
check_call("git clean -dfx", shell=True)
|
||||||
|
|
||||||
|
# edit the file
|
||||||
|
with open("libbeat/docs/version.asciidoc", "r") as f:
|
||||||
|
lines = f.readlines()
|
||||||
|
for i, line in enumerate(lines):
|
||||||
|
if line.startswith(":stack-version:"):
|
||||||
|
lines[i] = ":stack-version: {}\n".format(version)
|
||||||
|
if line.startswith(":branch:"):
|
||||||
|
lines[i] = ":branch: {}\n".format(branch)
|
||||||
|
if line.startswith(":doc-branch:"):
|
||||||
|
lines[i] = ":doc-branch: {}\n".format(branch)
|
||||||
|
with open("libbeat/docs/version.asciidoc", "w") as f:
|
||||||
|
f.writelines(lines)
|
||||||
|
|
||||||
|
check_call("make update", shell=True)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
3
vendor/github.com/elastic/beats/dev-tools/set_version
generated
vendored
3
vendor/github.com/elastic/beats/dev-tools/set_version
generated
vendored
@ -13,7 +13,8 @@ goversion_template = '''package main
|
|||||||
const appVersion = "{version}"
|
const appVersion = "{version}"
|
||||||
'''
|
'''
|
||||||
|
|
||||||
goversion_template_libbeat = '''package version
|
goversion_template_libbeat = '''// Code generated by dev-tools/set_version
|
||||||
|
package version
|
||||||
|
|
||||||
const defaultBeatVersion = "{version}"
|
const defaultBeatVersion = "{version}"
|
||||||
'''
|
'''
|
||||||
|
28
vendor/github.com/elastic/beats/docs/devguide/contributing.asciidoc
generated
vendored
28
vendor/github.com/elastic/beats/docs/devguide/contributing.asciidoc
generated
vendored
@ -16,14 +16,18 @@ The process for contributing to any of the Elastic repositories is similar.
|
|||||||
[[contribution-steps]]
|
[[contribution-steps]]
|
||||||
=== Contribution Steps
|
=== Contribution Steps
|
||||||
|
|
||||||
. Please make sure you have signed our https://www.elastic.co/contributor-agreement/[Contributor License Agreement]. We are not asking you to assign
|
. Please make sure you have signed our
|
||||||
copyright to us, but to give us the right to distribute your code without
|
https://www.elastic.co/contributor-agreement/[Contributor License Agreement]. We
|
||||||
restriction. We ask this of all contributors in order to assure our users of the
|
are not asking you to assign copyright to us, but to give us the right to
|
||||||
origin and continuing existence of the code. You only need to sign the CLA once.
|
distribute your code without restriction. We ask this of all contributors in
|
||||||
|
order to assure our users of the origin and continuing existence of the code.
|
||||||
|
You only need to sign the CLA once.
|
||||||
|
|
||||||
. Send a pull request! Push your changes to your fork of the repository and https://help.github.com/articles/using-pull-requests[submit a pull request]. In
|
. Send a pull request! Push your changes to your fork of the repository and
|
||||||
|
https://help.github.com/articles/using-pull-requests[submit a pull request]. In
|
||||||
the pull request, describe what your changes do and mention any bugs/issues
|
the pull request, describe what your changes do and mention any bugs/issues
|
||||||
related to the pull request. Please also add a changelog entry to https://github.com/elastic/beats/blob/master/CHANGELOG.asciidoc[CHANGELOG.asciidoc].
|
related to the pull request. Please also add a changelog entry to
|
||||||
|
https://github.com/elastic/beats/blob/master/CHANGELOG.asciidoc[CHANGELOG.asciidoc].
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
[[adding-new-beat]]
|
[[adding-new-beat]]
|
||||||
@ -92,8 +96,9 @@ This command has the following dependencies:
|
|||||||
* Python >= {python}
|
* Python >= {python}
|
||||||
* https://virtualenv.pypa.io/en/latest/[virtualenv] for Python
|
* https://virtualenv.pypa.io/en/latest/[virtualenv] for Python
|
||||||
|
|
||||||
Virtualenv can be installed with the command `easy_install virtualenv` or `pip install virtualenv`.
|
Virtualenv can be installed with the command `easy_install virtualenv` or `pip
|
||||||
More details can be found https://virtualenv.pypa.io/en/latest/installation.html[here].
|
install virtualenv`. More details can be found
|
||||||
|
https://virtualenv.pypa.io/en/latest/installation.html[here].
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
[[running-testsuite]]
|
[[running-testsuite]]
|
||||||
@ -136,3 +141,10 @@ the govendor documentation on how to add or update vendored dependencies.
|
|||||||
|
|
||||||
In most cases `govendor fetch your/dependency@version +out` will get the job done.
|
In most cases `govendor fetch your/dependency@version +out` will get the job done.
|
||||||
|
|
||||||
|
[float]
|
||||||
|
[[changelog]]
|
||||||
|
=== Changelog
|
||||||
|
|
||||||
|
To keep up to date with changes to the official Beats for community developers,
|
||||||
|
follow the developer changelog
|
||||||
|
https://github.com/elastic/beats/blob/master/CHANGELOG-developer.md[here].
|
||||||
|
3
vendor/github.com/elastic/beats/docs/devguide/index.asciidoc
generated
vendored
3
vendor/github.com/elastic/beats/docs/devguide/index.asciidoc
generated
vendored
@ -3,9 +3,12 @@
|
|||||||
|
|
||||||
include::../../libbeat/docs/version.asciidoc[]
|
include::../../libbeat/docs/version.asciidoc[]
|
||||||
|
|
||||||
|
:dev-guide: true
|
||||||
:beatname_lc: beatname
|
:beatname_lc: beatname
|
||||||
:beatname_uc: a Beat
|
:beatname_uc: a Beat
|
||||||
|
|
||||||
|
include::{asciidoc-dir}/../../shared/attributes.asciidoc[]
|
||||||
|
|
||||||
include::../../libbeat/docs/shared-beats-attributes.asciidoc[]
|
include::../../libbeat/docs/shared-beats-attributes.asciidoc[]
|
||||||
|
|
||||||
include::./contributing.asciidoc[]
|
include::./contributing.asciidoc[]
|
||||||
|
166
vendor/github.com/elastic/beats/docs/devguide/modules-dev-guide.asciidoc
generated
vendored
166
vendor/github.com/elastic/beats/docs/devguide/modules-dev-guide.asciidoc
generated
vendored
@ -18,26 +18,98 @@ example, the Nginx module has `access` and `error` filesets. You can contribute
|
|||||||
a new module (with at least one fileset), or a new fileset for an existing
|
a new module (with at least one fileset), or a new fileset for an existing
|
||||||
module.
|
module.
|
||||||
|
|
||||||
|
NOTE: In this guide we use `{module}` and `{fileset}` as placeholders for the
|
||||||
|
module and fileset names. You need to replace these with the actual names you
|
||||||
|
entered when your created the module and fileset. Only use characters `[a-z]` and, if required, underscores (`_`). No other characters are allowed.
|
||||||
|
|
||||||
|
[float]
|
||||||
|
=== Creating a new module
|
||||||
|
|
||||||
|
Run the following command in the `filebeat` folder:
|
||||||
|
|
||||||
|
[source,bash]
|
||||||
|
----
|
||||||
|
make create-module MODULE={module}
|
||||||
|
----
|
||||||
|
|
||||||
|
After running the `make create-module` command, you'll find the module,
|
||||||
|
along with its generated files, under `module/{module}`. This
|
||||||
|
directory contains the following files:
|
||||||
|
|
||||||
|
[source,bash]
|
||||||
|
----
|
||||||
|
module/{module}
|
||||||
|
├── module.yml
|
||||||
|
└── _meta
|
||||||
|
└── docs.asciidoc
|
||||||
|
└── fields.yml
|
||||||
|
└── kibana
|
||||||
|
----
|
||||||
|
|
||||||
|
Let's look at these files one by one.
|
||||||
|
|
||||||
|
[float]
|
||||||
|
==== module.yml
|
||||||
|
|
||||||
|
This file contains list of all the dashboards available for the module and used by `export_dashboards.go` script for exporting dashboards.
|
||||||
|
Each dashboard is defined by an id and the name of json file where the dashboard is saved locally.
|
||||||
|
At generation new fileset this file will be automatically updated with "default" dashboard settings for new fileset.
|
||||||
|
Please ensure that this settings are correct.
|
||||||
|
|
||||||
|
[float]
|
||||||
|
==== _meta/docs.asciidoc
|
||||||
|
|
||||||
|
This file contains module-specific documentation. You should include information
|
||||||
|
about which versions of the service were tested and the variables that are
|
||||||
|
defined in each fileset.
|
||||||
|
|
||||||
|
[float]
|
||||||
|
==== _meta/fields.yml
|
||||||
|
|
||||||
|
The module level `fields.yml` contains descriptions for the module-level fields.
|
||||||
|
Please review and update the title and the descriptions in this file. The title
|
||||||
|
is used as a title in the docs, so it's best to capitalize it.
|
||||||
|
|
||||||
|
[float]
|
||||||
|
==== _meta/kibana
|
||||||
|
|
||||||
|
This folder contains the sample Kibana dashboards for this module. To create
|
||||||
|
them, you can build them visually in Kibana and then run the following command:
|
||||||
|
|
||||||
|
[source,shell]
|
||||||
|
----
|
||||||
|
$ cd dev-tools/cmd/dashboards
|
||||||
|
$ make # if export_dashboard is not built
|
||||||
|
$ ./export_dashboards -dashboard '{dashboard-id}' -output '../../../filebeat/module/{module}/_meta/kibana/default/dashboard'
|
||||||
|
----
|
||||||
|
|
||||||
|
New Filebeat modules might not be compatible with Kibana 5.x. To export dashboards that are compatible with 5.x, run the following command inside the developer virtualenv:
|
||||||
|
|
||||||
|
[source,shell]
|
||||||
|
----
|
||||||
|
$ cd filebeat
|
||||||
|
$ make python-env
|
||||||
|
$ cd module/{module}/
|
||||||
|
$ python ../../../dev-tools/export_5x_dashboards.py --regex {module} --dir _meta/kibana/5.x
|
||||||
|
----
|
||||||
|
|
||||||
|
Where the `--regex` parameter should match the dashboard you want to export.
|
||||||
|
|
||||||
|
Please note that dashboards exported from Kibana 5.x are not compatible with Kibana 6.x.
|
||||||
|
|
||||||
|
You can find more details about the process of creating and exporting the Kibana
|
||||||
|
dashboards by reading {beatsdevguide}/new-dashboards.html[this guide].
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
=== Creating a new fileset
|
=== Creating a new fileset
|
||||||
|
|
||||||
Regardless of whether you are creating a fileset in a new or existing module,
|
Run the following command in the `filebeat` folder:
|
||||||
the procedure is similar. Run the following command in the `filebeat` folder:
|
|
||||||
|
|
||||||
[source,bash]
|
[source,bash]
|
||||||
----
|
----
|
||||||
make create-fileset MODULE={module} FILESET={fileset}
|
make create-fileset MODULE={module} FILESET={fileset}
|
||||||
----
|
----
|
||||||
|
|
||||||
Only use characters `[a-z]` and, if required, underscores (`_`).
|
|
||||||
No other characters are allowed.
|
|
||||||
For the module name, you can us either a new module name or an existing module
|
|
||||||
name. If the module doesn't exist, it will be created.
|
|
||||||
|
|
||||||
NOTE: In this guide we use `{fileset}` and `{module}` as placeholders for the
|
|
||||||
fileset and module names. You need to replace these with the actual names you
|
|
||||||
entered when your created the module and fileset.
|
|
||||||
|
|
||||||
After running the `make create-fileset` command, you'll find the fileset,
|
After running the `make create-fileset` command, you'll find the fileset,
|
||||||
along with its generated files, under `module/{module}/{fileset}`. This
|
along with its generated files, under `module/{module}/{fileset}`. This
|
||||||
directory contains the following files:
|
directory contains the following files:
|
||||||
@ -104,8 +176,7 @@ There's quite a lot going on in this file, so let's break it down:
|
|||||||
element: `"/example/test.log*"`.
|
element: `"/example/test.log*"`.
|
||||||
* Note that variable values don't have to be strings.
|
* Note that variable values don't have to be strings.
|
||||||
They can be also numbers, objects, or as shown in this example, arrays.
|
They can be also numbers, objects, or as shown in this example, arrays.
|
||||||
* We will use the `paths` variable to set the prospector
|
* We will use the `paths` variable to set the input `paths`
|
||||||
{filebeat}/configuration-filebeat-options.html#prospector-paths[paths]
|
|
||||||
setting, so "glob" values can be used here.
|
setting, so "glob" values can be used here.
|
||||||
* Besides the `default` value, the file defines values for particular
|
* Besides the `default` value, the file defines values for particular
|
||||||
operating systems: a default for darwin/OS X/macOS systems and a default for
|
operating systems: a default for darwin/OS X/macOS systems and a default for
|
||||||
@ -114,13 +185,13 @@ There's quite a lot going on in this file, so let's break it down:
|
|||||||
Filebeat is executed on the respective OS.
|
Filebeat is executed on the respective OS.
|
||||||
|
|
||||||
Besides the variable definition, the `manifest.yml` file also contains
|
Besides the variable definition, the `manifest.yml` file also contains
|
||||||
references to the ingest pipeline and prospector configuration to use (see next
|
references to the ingest pipeline and input configuration to use (see next
|
||||||
sections):
|
sections):
|
||||||
|
|
||||||
[source,yaml]
|
[source,yaml]
|
||||||
----
|
----
|
||||||
ingest_pipeline: ingest/pipeline.json
|
ingest_pipeline: ingest/pipeline.json
|
||||||
prospector: config/testfileset.yml
|
input: config/testfileset.yml
|
||||||
----
|
----
|
||||||
|
|
||||||
These should point to the respective files from the fileset.
|
These should point to the respective files from the fileset.
|
||||||
@ -142,8 +213,8 @@ overridden at runtime.)
|
|||||||
[float]
|
[float]
|
||||||
==== config/*.yml
|
==== config/*.yml
|
||||||
|
|
||||||
The `config/` folder contains template files that generate Filebeat prospector
|
The `config/` folder contains template files that generate Filebeat input
|
||||||
configurations. The Filebeat prospectors are primarily responsible for tailing
|
configurations. The Filebeat inputs are primarily responsible for tailing
|
||||||
files, filtering, and multi-line stitching, so that's what you configure in the
|
files, filtering, and multi-line stitching, so that's what you configure in the
|
||||||
template files.
|
template files.
|
||||||
|
|
||||||
@ -161,12 +232,12 @@ exclude_files: [".gz$"]
|
|||||||
|
|
||||||
You'll find this example in the template file that gets generated automatically
|
You'll find this example in the template file that gets generated automatically
|
||||||
when you run `make create-fileset`. In this example, the `paths` variable is
|
when you run `make create-fileset`. In this example, the `paths` variable is
|
||||||
used to construct the `paths` list for the {filebeat}/configuration-filebeat-options.html#prospector-paths[paths] option.
|
used to construct the `paths` list for the input `paths` option.
|
||||||
|
|
||||||
Any template files that you add to the `config/` folder need to generate a valid
|
Any template files that you add to the `config/` folder need to generate a valid
|
||||||
Filebeat prospector configuration in YAML format. The options accepted by the
|
Filebeat input configuration in YAML format. The options accepted by the
|
||||||
prospector configuration are documented in the
|
input configuration are documented in the
|
||||||
{filebeat}/configuration-filebeat-options.html[Filebeat Prospectors] section of
|
{filebeat}/configuration-filebeat-options.html[Filebeat Inputs] section of
|
||||||
the Filebeat documentation.
|
the Filebeat documentation.
|
||||||
|
|
||||||
The template files use the templating language defined by the
|
The template files use the templating language defined by the
|
||||||
@ -250,6 +321,9 @@ While developing the pipeline definition, we recommend making use of the
|
|||||||
{elasticsearch}/simulate-pipeline-api.html[Simulate Pipeline API] for testing
|
{elasticsearch}/simulate-pipeline-api.html[Simulate Pipeline API] for testing
|
||||||
and quick iteration.
|
and quick iteration.
|
||||||
|
|
||||||
|
By default Filebeat does not update Ingest pipelines if already loaded. If you want to force updating your pipeline
|
||||||
|
during development, use `--update-pipelines` flag. This uploads pipelines even if they are already available on the node.
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
==== _meta/fields.yml
|
==== _meta/fields.yml
|
||||||
|
|
||||||
@ -289,53 +363,3 @@ In addition, assuming you have a `test.log` file, you can add a
|
|||||||
documents as they are found via an Elasticsearch search. In this case, the
|
documents as they are found via an Elasticsearch search. In this case, the
|
||||||
integration tests will automatically check that the result is the same on each
|
integration tests will automatically check that the result is the same on each
|
||||||
run.
|
run.
|
||||||
|
|
||||||
[float]
|
|
||||||
=== Module-level files
|
|
||||||
|
|
||||||
Besides the files in the fileset folder, there is also data that needs to be
|
|
||||||
filled at the module level.
|
|
||||||
|
|
||||||
[float]
|
|
||||||
==== _meta/docs.asciidoc
|
|
||||||
|
|
||||||
This file contains module-specific documentation. You should include information
|
|
||||||
about which versions of the service were tested and the variables that are
|
|
||||||
defined in each fileset.
|
|
||||||
|
|
||||||
[float]
|
|
||||||
==== _meta/fields.yml
|
|
||||||
|
|
||||||
The module level `fields.yml` contains descriptions for the module-level fields.
|
|
||||||
Please review and update the title and the descriptions in this file. The title
|
|
||||||
is used as a title in the docs, so it's best to capitalize it.
|
|
||||||
|
|
||||||
[float]
|
|
||||||
==== _meta/kibana
|
|
||||||
|
|
||||||
This folder contains the sample Kibana dashboards for this module. To create
|
|
||||||
them, you can build them visually in Kibana and then run the following command:
|
|
||||||
|
|
||||||
[source,shell]
|
|
||||||
----
|
|
||||||
$ cd dev-tools/cmd/dashboards
|
|
||||||
$ make # if export_dashboard is not built
|
|
||||||
$ ./export_dashboards -dashboard '{dashboard-id}' -output '../../../filebeat/module/{module}/_meta/kibana/default/dashboard'
|
|
||||||
----
|
|
||||||
|
|
||||||
New Filebeat modules might not be compatible with Kibana 5.x. To export dashboards that are compatible with 5.x, run the following command inside the developer virtualenv:
|
|
||||||
|
|
||||||
[source,shell]
|
|
||||||
----
|
|
||||||
$ cd filebeat
|
|
||||||
$ make python-env
|
|
||||||
$ cd module/{module}/
|
|
||||||
$ python ../../../dev-tools/export_5x_dashboards.py --regex {module} --dir _meta/kibana/5.x
|
|
||||||
----
|
|
||||||
|
|
||||||
Where the `--regex` parameter should match the dashboard you want to export.
|
|
||||||
|
|
||||||
Please note that dashboards exported from Kibana 5.x are not compatible with Kibana 6.x.
|
|
||||||
|
|
||||||
You can find more details about the process of creating and exporting the Kibana
|
|
||||||
dashboards by reading {beatsdevguide}/new-dashboards.html[this guide].
|
|
||||||
|
13
vendor/github.com/elastic/beats/docs/devguide/newbeat.asciidoc
generated
vendored
13
vendor/github.com/elastic/beats/docs/devguide/newbeat.asciidoc
generated
vendored
@ -41,17 +41,16 @@ For general information about contributing to Beats, see <<beats-contributing>>.
|
|||||||
|
|
||||||
After you have https://golang.org/doc/install[installed Go] and set up the
|
After you have https://golang.org/doc/install[installed Go] and set up the
|
||||||
https://golang.org/doc/code.html#GOPATH[GOPATH] environment variable to point to
|
https://golang.org/doc/code.html#GOPATH[GOPATH] environment variable to point to
|
||||||
your preferred workspace location, a simple way of getting the source code for
|
your preferred workspace location, clone the Beats repository in the correct location
|
||||||
Beats, including libbeat and the Beat generator, is to do:
|
under `GOPATH`:
|
||||||
|
|
||||||
[source,shell]
|
[source,shell]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
go get github.com/elastic/beats
|
mkdir -p ${GOPATH}/src/github.com/elastic
|
||||||
|
git clone https://github.com/elastic/beats ${GOPATH}/src/github.com/elastic/beats
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
|
|
||||||
When you run the command, all source files are downloaded to the
|
To build your beat
|
||||||
`$GOPATH/src/github.com/elastic/beats` path. You can ignore the "no buildable Go source files" message because
|
|
||||||
you will build the source later. By default `go get` fetches the master branch. To build your beat
|
|
||||||
on a specific version of libbeat, check out the specific branch ({doc-branch} in the example below):
|
on a specific version of libbeat, check out the specific branch ({doc-branch} in the example below):
|
||||||
|
|
||||||
["source","sh",subs="attributes"]
|
["source","sh",subs="attributes"]
|
||||||
@ -474,9 +473,9 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
"github.com/spf13/cobra"
|
|
||||||
|
|
||||||
"github.com/elastic/beats/libbeat/beat"
|
"github.com/elastic/beats/libbeat/beat"
|
||||||
|
"github.com/elastic/beats/libbeat/cmd"
|
||||||
|
|
||||||
"github.com/kimjmin/countbeat/beater"
|
"github.com/kimjmin/countbeat/beater"
|
||||||
)
|
)
|
||||||
|
2
vendor/github.com/elastic/beats/docs/devguide/newdashboards.asciidoc
generated
vendored
2
vendor/github.com/elastic/beats/docs/devguide/newdashboards.asciidoc
generated
vendored
@ -85,7 +85,7 @@ import only the dashboards, use the `--dashboards` flag:
|
|||||||
|
|
||||||
Starting with Beats 6.0.0, the dashboards are no longer loaded directly into Elasticsearch. Instead, they are imported directly into Kibana.
|
Starting with Beats 6.0.0, the dashboards are no longer loaded directly into Elasticsearch. Instead, they are imported directly into Kibana.
|
||||||
Thus, if your Kibana instance is not listening on localhost, or you enabled
|
Thus, if your Kibana instance is not listening on localhost, or you enabled
|
||||||
X-Pack for Kibana, you need to either configure the Kibana endpoint in
|
{xpack} for Kibana, you need to either configure the Kibana endpoint in
|
||||||
the config for the Beat, or pass the Kibana host and credentials as
|
the config for the Beat, or pass the Kibana host and credentials as
|
||||||
arguments to the `setup` command. For example:
|
arguments to the `setup` command. For example:
|
||||||
|
|
||||||
|
2
vendor/github.com/elastic/beats/filebeat/Dockerfile
generated
vendored
2
vendor/github.com/elastic/beats/filebeat/Dockerfile
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
FROM golang:1.9.2
|
FROM golang:1.9.4
|
||||||
MAINTAINER Nicolas Ruflin <ruflin@elastic.co>
|
MAINTAINER Nicolas Ruflin <ruflin@elastic.co>
|
||||||
|
|
||||||
RUN set -x && \
|
RUN set -x && \
|
||||||
|
7
vendor/github.com/elastic/beats/filebeat/Makefile
generated
vendored
7
vendor/github.com/elastic/beats/filebeat/Makefile
generated
vendored
@ -21,7 +21,7 @@ kibana:
|
|||||||
|
|
||||||
# Collects all module and dataset fields
|
# Collects all module and dataset fields
|
||||||
.PHONY: fields
|
.PHONY: fields
|
||||||
fields:
|
fields: python-env
|
||||||
@mkdir -p _meta/
|
@mkdir -p _meta/
|
||||||
@cp ${ES_BEATS}/filebeat/_meta/fields.common.yml _meta/fields.generated.yml
|
@cp ${ES_BEATS}/filebeat/_meta/fields.common.yml _meta/fields.generated.yml
|
||||||
@${PYTHON_ENV}/bin/python ${ES_BEATS}/metricbeat/scripts/fields_collector.py >> _meta/fields.generated.yml
|
@${PYTHON_ENV}/bin/python ${ES_BEATS}/metricbeat/scripts/fields_collector.py >> _meta/fields.generated.yml
|
||||||
@ -62,6 +62,11 @@ imports: python-env
|
|||||||
.PHONY: collect
|
.PHONY: collect
|
||||||
collect: fields kibana modules configs collect-docs imports
|
collect: fields kibana modules configs collect-docs imports
|
||||||
|
|
||||||
|
# Creates a new module. Requires the params MODULE
|
||||||
|
.PHONY: create-module
|
||||||
|
create-module:
|
||||||
|
@go run ${ES_BEATS}/filebeat/scripts/generator/module/main.go --path=$(PWD) --beats_path=$(BEAT_GOPATH)/src/$(BEAT_PATH) --module=$(MODULE)
|
||||||
|
|
||||||
# Creates a new fileset. Requires the params MODULE and FILESET
|
# Creates a new fileset. Requires the params MODULE and FILESET
|
||||||
.PHONY: create-fileset
|
.PHONY: create-fileset
|
||||||
create-fileset:
|
create-fileset:
|
||||||
|
12
vendor/github.com/elastic/beats/filebeat/_meta/common.p2.yml
generated
vendored
12
vendor/github.com/elastic/beats/filebeat/_meta/common.p2.yml
generated
vendored
@ -1,17 +1,17 @@
|
|||||||
# For more available modules and options, please see the filebeat.reference.yml sample
|
# For more available modules and options, please see the filebeat.reference.yml sample
|
||||||
# configuration file.
|
# configuration file.
|
||||||
|
|
||||||
#=========================== Filebeat prospectors =============================
|
#=========================== Filebeat inputs =============================
|
||||||
|
|
||||||
filebeat.prospectors:
|
filebeat.inputs:
|
||||||
|
|
||||||
# Each - is a prospector. Most options can be set at the prospector level, so
|
# Each - is an input. Most options can be set at the input level, so
|
||||||
# you can use different prospectors for various configurations.
|
# you can use different inputs for various configurations.
|
||||||
# Below are the prospector specific configurations.
|
# Below are the input specific configurations.
|
||||||
|
|
||||||
- type: log
|
- type: log
|
||||||
|
|
||||||
# Change to true to enable this prospector configuration.
|
# Change to true to enable this input configuration.
|
||||||
enabled: false
|
enabled: false
|
||||||
|
|
||||||
# Paths that should be crawled and fetched. Glob based paths.
|
# Paths that should be crawled and fetched. Glob based paths.
|
||||||
|
112
vendor/github.com/elastic/beats/filebeat/_meta/common.reference.p2.yml
generated
vendored
112
vendor/github.com/elastic/beats/filebeat/_meta/common.reference.p2.yml
generated
vendored
@ -1,22 +1,22 @@
|
|||||||
#=========================== Filebeat prospectors =============================
|
#=========================== Filebeat inputs =============================
|
||||||
|
|
||||||
# List of prospectors to fetch data.
|
# List of inputs to fetch data.
|
||||||
filebeat.prospectors:
|
filebeat.inputs:
|
||||||
# Each - is a prospector. Most options can be set at the prospector level, so
|
# Each - is an input. Most options can be set at the input level, so
|
||||||
# you can use different prospectors for various configurations.
|
# you can use different inputs for various configurations.
|
||||||
# Below are the prospector specific configurations.
|
# Below are the input specific configurations.
|
||||||
|
|
||||||
# Type of the files. Based on this the way the file is read is decided.
|
# Type of the files. Based on this the way the file is read is decided.
|
||||||
# The different types cannot be mixed in one prospector
|
# The different types cannot be mixed in one input
|
||||||
#
|
#
|
||||||
# Possible options are:
|
# Possible options are:
|
||||||
# * log: Reads every line of the log file (default)
|
# * log: Reads every line of the log file (default)
|
||||||
# * stdin: Reads the standard in
|
# * stdin: Reads the standard in
|
||||||
|
|
||||||
#------------------------------ Log prospector --------------------------------
|
#------------------------------ Log input --------------------------------
|
||||||
- type: log
|
- type: log
|
||||||
|
|
||||||
# Change to true to enable this prospector configuration.
|
# Change to true to enable this input configuration.
|
||||||
enabled: false
|
enabled: false
|
||||||
|
|
||||||
# Paths that should be crawled and fetched. Glob based paths.
|
# Paths that should be crawled and fetched. Glob based paths.
|
||||||
@ -67,7 +67,7 @@ filebeat.prospectors:
|
|||||||
# Time strings like 2h (2 hours), 5m (5 minutes) can be used.
|
# Time strings like 2h (2 hours), 5m (5 minutes) can be used.
|
||||||
#ignore_older: 0
|
#ignore_older: 0
|
||||||
|
|
||||||
# How often the prospector checks for new files in the paths that are specified
|
# How often the input checks for new files in the paths that are specified
|
||||||
# for harvesting. Specify 1s to scan the directory as frequently as possible
|
# for harvesting. Specify 1s to scan the directory as frequently as possible
|
||||||
# without causing Filebeat to scan too frequently. Default: 10s.
|
# without causing Filebeat to scan too frequently. Default: 10s.
|
||||||
#scan_frequency: 10s
|
#scan_frequency: 10s
|
||||||
@ -137,7 +137,7 @@ filebeat.prospectors:
|
|||||||
# this can mean that the first entries of a new file are skipped.
|
# this can mean that the first entries of a new file are skipped.
|
||||||
#tail_files: false
|
#tail_files: false
|
||||||
|
|
||||||
# The Ingest Node pipeline ID associated with this prospector. If this is set, it
|
# The Ingest Node pipeline ID associated with this input. If this is set, it
|
||||||
# overwrites the pipeline option from the Elasticsearch output.
|
# overwrites the pipeline option from the Elasticsearch output.
|
||||||
#pipeline:
|
#pipeline:
|
||||||
|
|
||||||
@ -203,23 +203,25 @@ filebeat.prospectors:
|
|||||||
# Note: Potential data loss. Make sure to read and understand the docs for this option.
|
# Note: Potential data loss. Make sure to read and understand the docs for this option.
|
||||||
#close_timeout: 0
|
#close_timeout: 0
|
||||||
|
|
||||||
# Defines if prospectors is enabled
|
# Defines if inputs is enabled
|
||||||
#enabled: true
|
#enabled: true
|
||||||
|
|
||||||
#----------------------------- Stdin prospector -------------------------------
|
#----------------------------- Stdin input -------------------------------
|
||||||
# Configuration to use stdin input
|
# Configuration to use stdin input
|
||||||
#- type: stdin
|
#- type: stdin
|
||||||
|
|
||||||
#------------------------- Redis slowlog prospector ---------------------------
|
#------------------------- Redis slowlog input ---------------------------
|
||||||
# Experimental: Config options for the redis slow log prospector
|
# Experimental: Config options for the redis slow log input
|
||||||
#- type: redis
|
#- type: redis
|
||||||
#hosts: ["localhost:6379"]
|
|
||||||
#username:
|
|
||||||
#password:
|
|
||||||
#enabled: false
|
#enabled: false
|
||||||
|
|
||||||
|
# List of hosts to pool to retrieve the slow log information.
|
||||||
|
#hosts: ["localhost:6379"]
|
||||||
|
|
||||||
|
# How often the input checks for redis slow log.
|
||||||
#scan_frequency: 10s
|
#scan_frequency: 10s
|
||||||
|
|
||||||
# Timeout after which time the prospector should return an error
|
# Timeout after which time the input should return an error
|
||||||
#timeout: 1s
|
#timeout: 1s
|
||||||
|
|
||||||
# Network type to be used for redis connection. Default: tcp
|
# Network type to be used for redis connection. Default: tcp
|
||||||
@ -231,17 +233,64 @@ filebeat.prospectors:
|
|||||||
# Redis AUTH password. Empty by default.
|
# Redis AUTH password. Empty by default.
|
||||||
#password: foobared
|
#password: foobared
|
||||||
|
|
||||||
#------------------------------ Udp prospector --------------------------------
|
#------------------------------ Udp input --------------------------------
|
||||||
# Experimental: Config options for the udp prospector
|
# Experimental: Config options for the udp input
|
||||||
#- type: udp
|
#- type: udp
|
||||||
|
#enabled: false
|
||||||
|
|
||||||
# Maximum size of the message received over UDP
|
# Maximum size of the message received over UDP
|
||||||
#max_message_size: 10240
|
#max_message_size: 10KiB
|
||||||
|
|
||||||
|
#------------------------------ TCP input --------------------------------
|
||||||
|
# Experimental: Config options for the TCP input
|
||||||
|
#- type: tcp
|
||||||
|
#enabled: false
|
||||||
|
|
||||||
|
# The host and port to receive the new event
|
||||||
|
#host: "localhost:9000"
|
||||||
|
|
||||||
|
# Character used to split new message
|
||||||
|
#line_delimiter: "\n"
|
||||||
|
|
||||||
|
# Maximum size in bytes of the message received over TCP
|
||||||
|
#max_message_size: 20MiB
|
||||||
|
|
||||||
|
# The number of seconds of inactivity before a remote connection is closed.
|
||||||
|
#timeout: 300s
|
||||||
|
|
||||||
|
#------------------------------ Syslog input --------------------------------
|
||||||
|
# Experimental: Config options for the Syslog input
|
||||||
|
# Accept RFC3164 formatted syslog event via UDP.
|
||||||
|
#- type: syslog
|
||||||
|
#enabled: false
|
||||||
|
#protocol.udp:
|
||||||
|
# The host and port to receive the new event
|
||||||
|
#host: "localhost:9000"
|
||||||
|
|
||||||
|
# Maximum size of the message received over UDP
|
||||||
|
#max_message_size: 10KiB
|
||||||
|
|
||||||
|
# Accept RFC3164 formatted syslog event via TCP.
|
||||||
|
#- type: syslog
|
||||||
|
#enabled: false
|
||||||
|
|
||||||
|
#protocol.tcp:
|
||||||
|
# The host and port to receive the new event
|
||||||
|
#host: "localhost:9000"
|
||||||
|
|
||||||
|
# Character used to split new message
|
||||||
|
#line_delimiter: "\n"
|
||||||
|
|
||||||
|
# Maximum size in bytes of the message received over TCP
|
||||||
|
#max_message_size: 20MiB
|
||||||
|
|
||||||
|
# The number of seconds of inactivity before a remote connection is closed.
|
||||||
|
#timeout: 300s
|
||||||
|
|
||||||
#========================== Filebeat autodiscover ==============================
|
#========================== Filebeat autodiscover ==============================
|
||||||
|
|
||||||
# Autodiscover allows you to detect changes in the system and spawn new modules
|
# Autodiscover allows you to detect changes in the system and spawn new modules
|
||||||
# or prospectors as they happen.
|
# or inputs as they happen.
|
||||||
|
|
||||||
#filebeat.autodiscover:
|
#filebeat.autodiscover:
|
||||||
# List of enabled autodiscover providers
|
# List of enabled autodiscover providers
|
||||||
@ -261,10 +310,15 @@ filebeat.prospectors:
|
|||||||
# data path.
|
# data path.
|
||||||
#filebeat.registry_file: ${path.data}/registry
|
#filebeat.registry_file: ${path.data}/registry
|
||||||
|
|
||||||
# These config files must have the full filebeat config part inside, but only
|
# The permissions mask to apply on registry file. The default value is 0600.
|
||||||
# the prospector part is processed. All global options like spool_size are ignored.
|
# Must be a valid Unix-style file permissions mask expressed in octal notation.
|
||||||
# The config_dir MUST point to a different directory then where the main filebeat config file is in.
|
# This option is not supported on Windows.
|
||||||
#filebeat.config_dir:
|
#filebeat.registry_file_permissions: 0600
|
||||||
|
|
||||||
|
# By default Ingest pipelines are not updated if a pipeline with the same ID
|
||||||
|
# already exists. If this option is enabled Filebeat overwrites pipelines
|
||||||
|
# everytime a new Elasticsearch connection is established.
|
||||||
|
#filebeat.overwrite_pipelines: false
|
||||||
|
|
||||||
# How long filebeat waits on shutdown for the publisher to finish.
|
# How long filebeat waits on shutdown for the publisher to finish.
|
||||||
# Default is 0, not waiting.
|
# Default is 0, not waiting.
|
||||||
@ -272,9 +326,9 @@ filebeat.prospectors:
|
|||||||
|
|
||||||
# Enable filebeat config reloading
|
# Enable filebeat config reloading
|
||||||
#filebeat.config:
|
#filebeat.config:
|
||||||
#prospectors:
|
#inputs:
|
||||||
#enabled: false
|
#enabled: false
|
||||||
#path: prospectors.d/*.yml
|
#path: inputs.d/*.yml
|
||||||
#reload.enabled: true
|
#reload.enabled: true
|
||||||
#reload.period: 10s
|
#reload.period: 10s
|
||||||
#modules:
|
#modules:
|
||||||
|
51
vendor/github.com/elastic/beats/filebeat/_meta/fields.common.yml
generated
vendored
51
vendor/github.com/elastic/beats/filebeat/_meta/fields.common.yml
generated
vendored
@ -32,7 +32,14 @@
|
|||||||
- name: prospector.type
|
- name: prospector.type
|
||||||
required: true
|
required: true
|
||||||
description: >
|
description: >
|
||||||
The prospector type from which the event was generated. This field is set to the value specified for the `type` option in the prospector section of the Filebeat config file.
|
The input type from which the event was generated. This field is set to the value specified
|
||||||
|
for the `type` option in the input section of the Filebeat config file. (DEPRECATED: see `input.type`)
|
||||||
|
|
||||||
|
- name: input.type
|
||||||
|
required: true
|
||||||
|
description: >
|
||||||
|
The input type from which the event was generated. This field is set to the value specified
|
||||||
|
for the `type` option in the input section of the Filebeat config file.
|
||||||
|
|
||||||
- name: read_timestamp
|
- name: read_timestamp
|
||||||
description: >
|
description: >
|
||||||
@ -47,3 +54,45 @@
|
|||||||
- name: fileset.name
|
- name: fileset.name
|
||||||
description: >
|
description: >
|
||||||
The Filebeat fileset that generated this event.
|
The Filebeat fileset that generated this event.
|
||||||
|
|
||||||
|
- name: syslog.facility
|
||||||
|
type: long
|
||||||
|
required: false
|
||||||
|
description: >
|
||||||
|
The facility extracted from the priority.
|
||||||
|
|
||||||
|
- name: syslog.priority
|
||||||
|
type: long
|
||||||
|
required: false
|
||||||
|
description: >
|
||||||
|
The priority of the syslog event.
|
||||||
|
|
||||||
|
- name: syslog.severity_label
|
||||||
|
type: keyword
|
||||||
|
required: false
|
||||||
|
description: >
|
||||||
|
The human readable severity.
|
||||||
|
|
||||||
|
- name: syslog.facility_label
|
||||||
|
type: keyword
|
||||||
|
required: false
|
||||||
|
description: >
|
||||||
|
The human readable facility.
|
||||||
|
|
||||||
|
- name: process.program
|
||||||
|
type: keyword
|
||||||
|
required: false
|
||||||
|
description: >
|
||||||
|
The name of the program.
|
||||||
|
|
||||||
|
- name: process.pid
|
||||||
|
type: long
|
||||||
|
required: false
|
||||||
|
description: >
|
||||||
|
The pid of the process.
|
||||||
|
|
||||||
|
- name: event.severity
|
||||||
|
type: long
|
||||||
|
required: false
|
||||||
|
description: >
|
||||||
|
The severity of the event.
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
package beater
|
package autodiscover
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
@ -8,17 +8,17 @@ import (
|
|||||||
"github.com/elastic/beats/libbeat/common/bus"
|
"github.com/elastic/beats/libbeat/common/bus"
|
||||||
)
|
)
|
||||||
|
|
||||||
// AutodiscoverAdapter for Filebeat modules & prospectors
|
// AutodiscoverAdapter for Filebeat modules & input
|
||||||
type AutodiscoverAdapter struct {
|
type AutodiscoverAdapter struct {
|
||||||
prospectorFactory cfgfile.RunnerFactory
|
inputFactory cfgfile.RunnerFactory
|
||||||
moduleFactory cfgfile.RunnerFactory
|
moduleFactory cfgfile.RunnerFactory
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewAutodiscoverAdapter builds and returns an autodiscover adapter for Filebeat modules & prospectors
|
// NewAutodiscoverAdapter builds and returns an autodiscover adapter for Filebeat modules & input
|
||||||
func NewAutodiscoverAdapter(prospectorFactory, moduleFactory cfgfile.RunnerFactory) *AutodiscoverAdapter {
|
func NewAutodiscoverAdapter(inputFactory, moduleFactory cfgfile.RunnerFactory) *AutodiscoverAdapter {
|
||||||
return &AutodiscoverAdapter{
|
return &AutodiscoverAdapter{
|
||||||
prospectorFactory: prospectorFactory,
|
inputFactory: inputFactory,
|
||||||
moduleFactory: moduleFactory,
|
moduleFactory: moduleFactory,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -37,12 +37,12 @@ func (m *AutodiscoverAdapter) CheckConfig(c *common.Config) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a module or prospector from the given config
|
// Create a module or input from the given config
|
||||||
func (m *AutodiscoverAdapter) Create(c *common.Config, meta *common.MapStrPointer) (cfgfile.Runner, error) {
|
func (m *AutodiscoverAdapter) Create(c *common.Config, meta *common.MapStrPointer) (cfgfile.Runner, error) {
|
||||||
if c.HasField("module") {
|
if c.HasField("module") {
|
||||||
return m.moduleFactory.Create(c, meta)
|
return m.moduleFactory.Create(c, meta)
|
||||||
}
|
}
|
||||||
return m.prospectorFactory.Create(c, meta)
|
return m.inputFactory.Create(c, meta)
|
||||||
}
|
}
|
||||||
|
|
||||||
// EventFilter returns the bus filter to retrieve runner start/stop triggering events
|
// EventFilter returns the bus filter to retrieve runner start/stop triggering events
|
24
vendor/github.com/elastic/beats/filebeat/autodiscover/builder/hints/config.go
generated
vendored
Normal file
24
vendor/github.com/elastic/beats/filebeat/autodiscover/builder/hints/config.go
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
package hints
|
||||||
|
|
||||||
|
import "github.com/elastic/beats/libbeat/common"
|
||||||
|
|
||||||
|
type config struct {
|
||||||
|
Key string `config:"key"`
|
||||||
|
Config *common.Config `config:"config"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func defaultConfig() config {
|
||||||
|
rawCfg := map[string]interface{}{
|
||||||
|
"type": "docker",
|
||||||
|
"containers": map[string]interface{}{
|
||||||
|
"ids": []string{
|
||||||
|
"${data.container.id}",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
cfg, _ := common.NewConfigFrom(rawCfg)
|
||||||
|
return config{
|
||||||
|
Key: "logs",
|
||||||
|
Config: cfg,
|
||||||
|
}
|
||||||
|
}
|
183
vendor/github.com/elastic/beats/filebeat/autodiscover/builder/hints/logs.go
generated
vendored
Normal file
183
vendor/github.com/elastic/beats/filebeat/autodiscover/builder/hints/logs.go
generated
vendored
Normal file
@ -0,0 +1,183 @@
|
|||||||
|
package hints
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
|
||||||
|
"github.com/elastic/beats/filebeat/fileset"
|
||||||
|
"github.com/elastic/beats/libbeat/autodiscover"
|
||||||
|
"github.com/elastic/beats/libbeat/autodiscover/builder"
|
||||||
|
"github.com/elastic/beats/libbeat/autodiscover/template"
|
||||||
|
"github.com/elastic/beats/libbeat/common"
|
||||||
|
"github.com/elastic/beats/libbeat/common/bus"
|
||||||
|
"github.com/elastic/beats/libbeat/common/cfgwarn"
|
||||||
|
"github.com/elastic/beats/libbeat/logp"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
autodiscover.Registry.AddBuilder("hints", NewLogHints)
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
multiline = "multiline"
|
||||||
|
includeLines = "include_lines"
|
||||||
|
excludeLines = "exclude_lines"
|
||||||
|
)
|
||||||
|
|
||||||
|
// validModuleNames to sanitize user input
|
||||||
|
var validModuleNames = regexp.MustCompile("[^a-zA-Z0-9]+")
|
||||||
|
|
||||||
|
type logHints struct {
|
||||||
|
Key string
|
||||||
|
Config *common.Config
|
||||||
|
Registry *fileset.ModuleRegistry
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewLogHints builds a log hints builder
|
||||||
|
func NewLogHints(cfg *common.Config) (autodiscover.Builder, error) {
|
||||||
|
cfgwarn.Beta("The hints builder is beta")
|
||||||
|
config := defaultConfig()
|
||||||
|
err := cfg.Unpack(&config)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to unpack hints config due to error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
moduleRegistry, err := fileset.NewModuleRegistry([]*common.Config{}, "", false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &logHints{config.Key, config.Config, moduleRegistry}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create config based on input hints in the bus event
|
||||||
|
func (l *logHints) CreateConfig(event bus.Event) []*common.Config {
|
||||||
|
// Clone original config
|
||||||
|
config, _ := common.NewConfigFrom(l.Config)
|
||||||
|
host, _ := event["host"].(string)
|
||||||
|
if host == "" {
|
||||||
|
return []*common.Config{}
|
||||||
|
}
|
||||||
|
|
||||||
|
var hints common.MapStr
|
||||||
|
hIface, ok := event["hints"]
|
||||||
|
if ok {
|
||||||
|
hints, _ = hIface.(common.MapStr)
|
||||||
|
}
|
||||||
|
|
||||||
|
if builder.IsNoOp(hints, l.Key) == true {
|
||||||
|
return []*common.Config{config}
|
||||||
|
}
|
||||||
|
|
||||||
|
tempCfg := common.MapStr{}
|
||||||
|
mline := l.getMultiline(hints)
|
||||||
|
if len(mline) != 0 {
|
||||||
|
tempCfg.Put(multiline, mline)
|
||||||
|
}
|
||||||
|
if ilines := l.getIncludeLines(hints); len(ilines) != 0 {
|
||||||
|
tempCfg.Put(includeLines, ilines)
|
||||||
|
}
|
||||||
|
if elines := l.getExcludeLines(hints); len(elines) != 0 {
|
||||||
|
tempCfg.Put(excludeLines, elines)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge config template with the configs from the annotations
|
||||||
|
if err := config.Merge(tempCfg); err != nil {
|
||||||
|
logp.Debug("hints.builder", "config merge failed with error: %v", err)
|
||||||
|
return []*common.Config{config}
|
||||||
|
}
|
||||||
|
|
||||||
|
module := l.getModule(hints)
|
||||||
|
if module != "" {
|
||||||
|
moduleConf := map[string]interface{}{
|
||||||
|
"module": module,
|
||||||
|
}
|
||||||
|
|
||||||
|
filesets := l.getFilesets(hints, module)
|
||||||
|
for fileset, conf := range filesets {
|
||||||
|
filesetConf, _ := common.NewConfigFrom(config)
|
||||||
|
filesetConf.SetString("containers.stream", -1, conf.Stream)
|
||||||
|
|
||||||
|
moduleConf[fileset+".enabled"] = conf.Enabled
|
||||||
|
moduleConf[fileset+".input"] = filesetConf
|
||||||
|
|
||||||
|
logp.Debug("hints.builder", "generated config %+v", moduleConf)
|
||||||
|
}
|
||||||
|
config, _ = common.NewConfigFrom(moduleConf)
|
||||||
|
}
|
||||||
|
|
||||||
|
logp.Debug("hints.builder", "generated config %+v", config)
|
||||||
|
|
||||||
|
// Apply information in event to the template to generate the final config
|
||||||
|
return template.ApplyConfigTemplate(event, []*common.Config{config})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *logHints) getMultiline(hints common.MapStr) common.MapStr {
|
||||||
|
return builder.GetHintMapStr(hints, l.Key, multiline)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *logHints) getIncludeLines(hints common.MapStr) []string {
|
||||||
|
return builder.GetHintAsList(hints, l.Key, includeLines)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *logHints) getExcludeLines(hints common.MapStr) []string {
|
||||||
|
return builder.GetHintAsList(hints, l.Key, excludeLines)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *logHints) getModule(hints common.MapStr) string {
|
||||||
|
module := builder.GetHintString(hints, l.Key, "module")
|
||||||
|
// for security, strip module name
|
||||||
|
return validModuleNames.ReplaceAllString(module, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
type filesetConfig struct {
|
||||||
|
Enabled bool
|
||||||
|
Stream string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return a map containing filesets -> enabled & stream (stdout, stderr, all)
|
||||||
|
func (l *logHints) getFilesets(hints common.MapStr, module string) map[string]*filesetConfig {
|
||||||
|
var configured bool
|
||||||
|
filesets := make(map[string]*filesetConfig)
|
||||||
|
|
||||||
|
moduleFilesets, err := l.Registry.ModuleFilesets(module)
|
||||||
|
if err != nil {
|
||||||
|
logp.Err("Error retrieving module filesets", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, fileset := range moduleFilesets {
|
||||||
|
filesets[fileset] = &filesetConfig{Enabled: false, Stream: "all"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If a single fileset is given, pass all streams to it
|
||||||
|
fileset := builder.GetHintString(hints, l.Key, "fileset")
|
||||||
|
if fileset != "" {
|
||||||
|
if conf, ok := filesets[fileset]; ok {
|
||||||
|
conf.Enabled = true
|
||||||
|
configured = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If fileset is defined per stream, return all of them
|
||||||
|
for _, stream := range []string{"all", "stdout", "stderr"} {
|
||||||
|
fileset := builder.GetHintString(hints, l.Key, "fileset."+stream)
|
||||||
|
if fileset != "" {
|
||||||
|
if conf, ok := filesets[fileset]; ok {
|
||||||
|
conf.Enabled = true
|
||||||
|
conf.Stream = stream
|
||||||
|
configured = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// No fileseat defined, return defaults for the module, all streams to all filesets
|
||||||
|
if !configured {
|
||||||
|
for _, conf := range filesets {
|
||||||
|
conf.Enabled = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return filesets
|
||||||
|
}
|
295
vendor/github.com/elastic/beats/filebeat/autodiscover/builder/hints/logs_test.go
generated
vendored
Normal file
295
vendor/github.com/elastic/beats/filebeat/autodiscover/builder/hints/logs_test.go
generated
vendored
Normal file
@ -0,0 +1,295 @@
|
|||||||
|
package hints
|
||||||
|
|
||||||
|
import (
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
|
"github.com/elastic/beats/libbeat/common"
|
||||||
|
"github.com/elastic/beats/libbeat/common/bus"
|
||||||
|
"github.com/elastic/beats/libbeat/paths"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGenerateHints(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
msg string
|
||||||
|
event bus.Event
|
||||||
|
len int
|
||||||
|
result common.MapStr
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
msg: "Hints without host should return nothing",
|
||||||
|
event: bus.Event{
|
||||||
|
"hints": common.MapStr{
|
||||||
|
"metrics": common.MapStr{
|
||||||
|
"module": "prometheus",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
len: 0,
|
||||||
|
result: common.MapStr{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
msg: "Empty event hints should return default config",
|
||||||
|
event: bus.Event{
|
||||||
|
"host": "1.2.3.4",
|
||||||
|
"kubernetes": common.MapStr{
|
||||||
|
"container": common.MapStr{
|
||||||
|
"name": "foobar",
|
||||||
|
"id": "abc",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"container": common.MapStr{
|
||||||
|
"name": "foobar",
|
||||||
|
"id": "abc",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
len: 1,
|
||||||
|
result: common.MapStr{
|
||||||
|
"type": "docker",
|
||||||
|
"containers": map[string]interface{}{
|
||||||
|
"ids": []interface{}{"abc"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
msg: "Hint with include|exclude_lines must be part of the input config",
|
||||||
|
event: bus.Event{
|
||||||
|
"host": "1.2.3.4",
|
||||||
|
"kubernetes": common.MapStr{
|
||||||
|
"container": common.MapStr{
|
||||||
|
"name": "foobar",
|
||||||
|
"id": "abc",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"container": common.MapStr{
|
||||||
|
"name": "foobar",
|
||||||
|
"id": "abc",
|
||||||
|
},
|
||||||
|
"hints": common.MapStr{
|
||||||
|
"logs": common.MapStr{
|
||||||
|
"include_lines": "^test, ^test1",
|
||||||
|
"exclude_lines": "^test2, ^test3",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
len: 1,
|
||||||
|
result: common.MapStr{
|
||||||
|
"type": "docker",
|
||||||
|
"containers": map[string]interface{}{
|
||||||
|
"ids": []interface{}{"abc"},
|
||||||
|
},
|
||||||
|
"include_lines": []interface{}{"^test", "^test1"},
|
||||||
|
"exclude_lines": []interface{}{"^test2", "^test3"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
msg: "Hint with multiline config must have a multiline in the input config",
|
||||||
|
event: bus.Event{
|
||||||
|
"host": "1.2.3.4",
|
||||||
|
"kubernetes": common.MapStr{
|
||||||
|
"container": common.MapStr{
|
||||||
|
"name": "foobar",
|
||||||
|
"id": "abc",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"container": common.MapStr{
|
||||||
|
"name": "foobar",
|
||||||
|
"id": "abc",
|
||||||
|
},
|
||||||
|
"hints": common.MapStr{
|
||||||
|
"logs": common.MapStr{
|
||||||
|
"multiline": common.MapStr{
|
||||||
|
"pattern": "^test",
|
||||||
|
"negate": "true",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
len: 1,
|
||||||
|
result: common.MapStr{
|
||||||
|
"type": "docker",
|
||||||
|
"containers": map[string]interface{}{
|
||||||
|
"ids": []interface{}{"abc"},
|
||||||
|
},
|
||||||
|
"multiline": map[string]interface{}{
|
||||||
|
"pattern": "^test",
|
||||||
|
"negate": "true",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
msg: "Hint with module should attach input to its filesets",
|
||||||
|
event: bus.Event{
|
||||||
|
"host": "1.2.3.4",
|
||||||
|
"kubernetes": common.MapStr{
|
||||||
|
"container": common.MapStr{
|
||||||
|
"name": "foobar",
|
||||||
|
"id": "abc",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"container": common.MapStr{
|
||||||
|
"name": "foobar",
|
||||||
|
"id": "abc",
|
||||||
|
},
|
||||||
|
"hints": common.MapStr{
|
||||||
|
"logs": common.MapStr{
|
||||||
|
"module": "apache2",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
len: 1,
|
||||||
|
result: common.MapStr{
|
||||||
|
"module": "apache2",
|
||||||
|
"error": map[string]interface{}{
|
||||||
|
"enabled": true,
|
||||||
|
"input": map[string]interface{}{
|
||||||
|
"type": "docker",
|
||||||
|
"containers": map[string]interface{}{
|
||||||
|
"stream": "all",
|
||||||
|
"ids": []interface{}{"abc"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"access": map[string]interface{}{
|
||||||
|
"enabled": true,
|
||||||
|
"input": map[string]interface{}{
|
||||||
|
"type": "docker",
|
||||||
|
"containers": map[string]interface{}{
|
||||||
|
"stream": "all",
|
||||||
|
"ids": []interface{}{"abc"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
msg: "Hint with module should honor defined filesets",
|
||||||
|
event: bus.Event{
|
||||||
|
"host": "1.2.3.4",
|
||||||
|
"kubernetes": common.MapStr{
|
||||||
|
"container": common.MapStr{
|
||||||
|
"name": "foobar",
|
||||||
|
"id": "abc",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"container": common.MapStr{
|
||||||
|
"name": "foobar",
|
||||||
|
"id": "abc",
|
||||||
|
},
|
||||||
|
"hints": common.MapStr{
|
||||||
|
"logs": common.MapStr{
|
||||||
|
"module": "apache2",
|
||||||
|
"fileset": "access",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
len: 1,
|
||||||
|
result: common.MapStr{
|
||||||
|
"module": "apache2",
|
||||||
|
"access": map[string]interface{}{
|
||||||
|
"enabled": true,
|
||||||
|
"input": map[string]interface{}{
|
||||||
|
"type": "docker",
|
||||||
|
"containers": map[string]interface{}{
|
||||||
|
"stream": "all",
|
||||||
|
"ids": []interface{}{"abc"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"error": map[string]interface{}{
|
||||||
|
"enabled": false,
|
||||||
|
"input": map[string]interface{}{
|
||||||
|
"type": "docker",
|
||||||
|
"containers": map[string]interface{}{
|
||||||
|
"stream": "all",
|
||||||
|
"ids": []interface{}{"abc"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
msg: "Hint with module should honor defined filesets with streams",
|
||||||
|
event: bus.Event{
|
||||||
|
"host": "1.2.3.4",
|
||||||
|
"kubernetes": common.MapStr{
|
||||||
|
"container": common.MapStr{
|
||||||
|
"name": "foobar",
|
||||||
|
"id": "abc",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"container": common.MapStr{
|
||||||
|
"name": "foobar",
|
||||||
|
"id": "abc",
|
||||||
|
},
|
||||||
|
"hints": common.MapStr{
|
||||||
|
"logs": common.MapStr{
|
||||||
|
"module": "apache2",
|
||||||
|
"fileset.stdout": "access",
|
||||||
|
"fileset.stderr": "error",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
len: 1,
|
||||||
|
result: common.MapStr{
|
||||||
|
"module": "apache2",
|
||||||
|
"access": map[string]interface{}{
|
||||||
|
"enabled": true,
|
||||||
|
"input": map[string]interface{}{
|
||||||
|
"type": "docker",
|
||||||
|
"containers": map[string]interface{}{
|
||||||
|
"stream": "stdout",
|
||||||
|
"ids": []interface{}{"abc"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"error": map[string]interface{}{
|
||||||
|
"enabled": true,
|
||||||
|
"input": map[string]interface{}{
|
||||||
|
"type": "docker",
|
||||||
|
"containers": map[string]interface{}{
|
||||||
|
"stream": "stderr",
|
||||||
|
"ids": []interface{}{"abc"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
cfg, _ := common.NewConfigFrom(map[string]interface{}{
|
||||||
|
"type": "docker",
|
||||||
|
"containers": map[string]interface{}{
|
||||||
|
"ids": []string{
|
||||||
|
"${data.container.id}",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
// Configure path for modules access
|
||||||
|
abs, _ := filepath.Abs("../../..")
|
||||||
|
err := paths.InitPaths(&paths.Path{
|
||||||
|
Home: abs,
|
||||||
|
})
|
||||||
|
|
||||||
|
l, err := NewLogHints(cfg)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfgs := l.CreateConfig(test.event)
|
||||||
|
assert.Equal(t, len(cfgs), test.len, test.msg)
|
||||||
|
|
||||||
|
if test.len != 0 {
|
||||||
|
config := common.MapStr{}
|
||||||
|
err := cfgs[0].Unpack(&config)
|
||||||
|
assert.Nil(t, err, test.msg)
|
||||||
|
|
||||||
|
assert.Equal(t, test.result, config, test.msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
6
vendor/github.com/elastic/beats/filebeat/autodiscover/include.go
generated
vendored
Normal file
6
vendor/github.com/elastic/beats/filebeat/autodiscover/include.go
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
package autodiscover
|
||||||
|
|
||||||
|
import (
|
||||||
|
// include all filebeat specific builders
|
||||||
|
_ "github.com/elastic/beats/filebeat/autodiscover/builder/hints"
|
||||||
|
)
|
28
vendor/github.com/elastic/beats/filebeat/beater/acker.go
generated
vendored
28
vendor/github.com/elastic/beats/filebeat/beater/acker.go
generated
vendored
@ -2,31 +2,41 @@ package beater
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/elastic/beats/filebeat/input/file"
|
"github.com/elastic/beats/filebeat/input/file"
|
||||||
|
"github.com/elastic/beats/libbeat/logp"
|
||||||
)
|
)
|
||||||
|
|
||||||
// eventAcker handles publisher pipeline ACKs and forwards
|
// eventAcker handles publisher pipeline ACKs and forwards
|
||||||
// them to the registrar.
|
// them to the registrar or directly to the stateless logger.
|
||||||
type eventACKer struct {
|
type eventACKer struct {
|
||||||
out successLogger
|
stateful statefulLogger
|
||||||
|
stateless statelessLogger
|
||||||
|
log *logp.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
type successLogger interface {
|
type statefulLogger interface {
|
||||||
Published(states []file.State)
|
Published(states []file.State)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newEventACKer(out successLogger) *eventACKer {
|
type statelessLogger interface {
|
||||||
return &eventACKer{out: out}
|
Published(c int) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func newEventACKer(stateless statelessLogger, stateful statefulLogger) *eventACKer {
|
||||||
|
return &eventACKer{stateless: stateless, stateful: stateful, log: logp.NewLogger("acker")}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *eventACKer) ackEvents(data []interface{}) {
|
func (a *eventACKer) ackEvents(data []interface{}) {
|
||||||
|
stateless := 0
|
||||||
states := make([]file.State, 0, len(data))
|
states := make([]file.State, 0, len(data))
|
||||||
for _, datum := range data {
|
for _, datum := range data {
|
||||||
if datum == nil {
|
if datum == nil {
|
||||||
|
stateless++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
st, ok := datum.(file.State)
|
st, ok := datum.(file.State)
|
||||||
if !ok {
|
if !ok {
|
||||||
|
stateless++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -34,6 +44,12 @@ func (a *eventACKer) ackEvents(data []interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(states) > 0 {
|
if len(states) > 0 {
|
||||||
a.out.Published(states)
|
a.log.Debugw("stateful ack", "count", len(states))
|
||||||
|
a.stateful.Published(states)
|
||||||
|
}
|
||||||
|
|
||||||
|
if stateless > 0 {
|
||||||
|
a.log.Debugw("stateless ack", "count", stateless)
|
||||||
|
a.stateless.Published(stateless)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
71
vendor/github.com/elastic/beats/filebeat/beater/acker_test.go
generated
vendored
Normal file
71
vendor/github.com/elastic/beats/filebeat/beater/acker_test.go
generated
vendored
Normal file
@ -0,0 +1,71 @@
|
|||||||
|
package beater
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
|
"github.com/elastic/beats/filebeat/input/file"
|
||||||
|
)
|
||||||
|
|
||||||
|
type mockStatefulLogger struct {
|
||||||
|
states []file.State
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sf *mockStatefulLogger) Published(states []file.State) {
|
||||||
|
sf.states = states
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockStatelessLogger struct {
|
||||||
|
count int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sl *mockStatelessLogger) Published(count int) bool {
|
||||||
|
sl.count = count
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestACKer(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
data []interface{}
|
||||||
|
stateless int
|
||||||
|
stateful []file.State
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "only stateless",
|
||||||
|
data: []interface{}{nil, nil},
|
||||||
|
stateless: 2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "only stateful",
|
||||||
|
data: []interface{}{file.State{Source: "-"}, file.State{Source: "-"}},
|
||||||
|
stateful: []file.State{file.State{Source: "-"}, file.State{Source: "-"}},
|
||||||
|
stateless: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "both",
|
||||||
|
data: []interface{}{file.State{Source: "-"}, nil, file.State{Source: "-"}},
|
||||||
|
stateful: []file.State{file.State{Source: "-"}, file.State{Source: "-"}},
|
||||||
|
stateless: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "any other Private type",
|
||||||
|
data: []interface{}{struct{}{}, nil},
|
||||||
|
stateless: 2,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
sl := &mockStatelessLogger{}
|
||||||
|
sf := &mockStatefulLogger{}
|
||||||
|
|
||||||
|
h := newEventACKer(sl, sf)
|
||||||
|
|
||||||
|
h.ackEvents(test.data)
|
||||||
|
assert.Equal(t, test.stateless, sl.count)
|
||||||
|
assert.Equal(t, test.stateful, sf.states)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
149
vendor/github.com/elastic/beats/filebeat/beater/filebeat.go
generated
vendored
149
vendor/github.com/elastic/beats/filebeat/beater/filebeat.go
generated
vendored
@ -3,6 +3,7 @@ package beater
|
|||||||
import (
|
import (
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/joeshaw/multierror"
|
"github.com/joeshaw/multierror"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@ -15,7 +16,9 @@ import (
|
|||||||
"github.com/elastic/beats/libbeat/logp"
|
"github.com/elastic/beats/libbeat/logp"
|
||||||
"github.com/elastic/beats/libbeat/monitoring"
|
"github.com/elastic/beats/libbeat/monitoring"
|
||||||
"github.com/elastic/beats/libbeat/outputs/elasticsearch"
|
"github.com/elastic/beats/libbeat/outputs/elasticsearch"
|
||||||
|
"github.com/elastic/beats/libbeat/setup/kibana"
|
||||||
|
|
||||||
|
fbautodiscover "github.com/elastic/beats/filebeat/autodiscover"
|
||||||
"github.com/elastic/beats/filebeat/channel"
|
"github.com/elastic/beats/filebeat/channel"
|
||||||
cfg "github.com/elastic/beats/filebeat/config"
|
cfg "github.com/elastic/beats/filebeat/config"
|
||||||
"github.com/elastic/beats/filebeat/crawler"
|
"github.com/elastic/beats/filebeat/crawler"
|
||||||
@ -54,6 +57,22 @@ func New(b *beat.Beat, rawConfig *common.Config) (beat.Beater, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(config.Prospectors) > 0 {
|
||||||
|
cfgwarn.Deprecate("7.0.0", "prospectors are deprecated, Use `inputs` instead.")
|
||||||
|
if len(config.Inputs) > 0 {
|
||||||
|
return nil, fmt.Errorf("prospectors and inputs used in the configuration file, define only inputs not both")
|
||||||
|
}
|
||||||
|
config.Inputs = config.Prospectors
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.ConfigProspector != nil {
|
||||||
|
cfgwarn.Deprecate("7.0.0", "config.prospectors are deprecated, Use `config.inputs` instead.")
|
||||||
|
if config.ConfigInput != nil {
|
||||||
|
return nil, fmt.Errorf("config.prospectors and config.inputs used in the configuration file, define only config.inputs not both")
|
||||||
|
}
|
||||||
|
config.ConfigInput = config.ConfigProspector
|
||||||
|
}
|
||||||
|
|
||||||
moduleRegistry, err := fileset.NewModuleRegistry(config.Modules, b.Info.Version, true)
|
moduleRegistry, err := fileset.NewModuleRegistry(config.Modules, b.Info.Version, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -62,7 +81,7 @@ func New(b *beat.Beat, rawConfig *common.Config) (beat.Beater, error) {
|
|||||||
logp.Info("Enabled modules/filesets: %s", moduleRegistry.InfoString())
|
logp.Info("Enabled modules/filesets: %s", moduleRegistry.InfoString())
|
||||||
}
|
}
|
||||||
|
|
||||||
moduleProspectors, err := moduleRegistry.GetProspectorConfigs()
|
moduleInputs, err := moduleRegistry.GetInputConfigs()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -71,28 +90,30 @@ func New(b *beat.Beat, rawConfig *common.Config) (beat.Beater, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add prospectors created by the modules
|
// Add inputs created by the modules
|
||||||
config.Prospectors = append(config.Prospectors, moduleProspectors...)
|
config.Inputs = append(config.Inputs, moduleInputs...)
|
||||||
|
|
||||||
haveEnabledProspectors := false
|
enabledInputs := config.ListEnabledInputs()
|
||||||
for _, prospector := range config.Prospectors {
|
var haveEnabledInputs bool
|
||||||
if prospector.Enabled() {
|
if len(enabledInputs) > 0 {
|
||||||
haveEnabledProspectors = true
|
haveEnabledInputs = true
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if !config.ConfigProspector.Enabled() && !config.ConfigModules.Enabled() && !haveEnabledProspectors && config.Autodiscover == nil {
|
if !config.ConfigInput.Enabled() && !config.ConfigModules.Enabled() && !haveEnabledInputs && config.Autodiscover == nil {
|
||||||
if !b.InSetupCmd {
|
if !b.InSetupCmd {
|
||||||
return nil, errors.New("No modules or prospectors enabled and configuration reloading disabled. What files do you want me to watch?")
|
return nil, errors.New("no modules or inputs enabled and configuration reloading disabled. What files do you want me to watch?")
|
||||||
}
|
}
|
||||||
|
|
||||||
// in the `setup` command, log this only as a warning
|
// in the `setup` command, log this only as a warning
|
||||||
logp.Warn("Setup called, but no modules enabled.")
|
logp.Warn("Setup called, but no modules enabled.")
|
||||||
}
|
}
|
||||||
|
|
||||||
if *once && config.ConfigProspector.Enabled() && config.ConfigModules.Enabled() {
|
if *once && config.ConfigInput.Enabled() && config.ConfigModules.Enabled() {
|
||||||
return nil, errors.New("prospector configs and -once cannot be used together")
|
return nil, errors.New("input configs and -once cannot be used together")
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.IsInputEnabled("stdin") && len(enabledInputs) > 1 {
|
||||||
|
return nil, fmt.Errorf("stdin requires to be run in exclusive mode, configured inputs: %s", strings.Join(enabledInputs, ", "))
|
||||||
}
|
}
|
||||||
|
|
||||||
fb := &Filebeat{
|
fb := &Filebeat{
|
||||||
@ -102,12 +123,36 @@ func New(b *beat.Beat, rawConfig *common.Config) (beat.Beater, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// register `setup` callback for ML jobs
|
// register `setup` callback for ML jobs
|
||||||
b.SetupMLCallback = func(b *beat.Beat) error {
|
b.SetupMLCallback = func(b *beat.Beat, kibanaConfig *common.Config) error {
|
||||||
return fb.loadModulesML(b)
|
return fb.loadModulesML(b, kibanaConfig)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = fb.setupPipelineLoaderCallback(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
return fb, nil
|
return fb, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (fb *Filebeat) setupPipelineLoaderCallback(b *beat.Beat) error {
|
||||||
|
if !fb.moduleRegistry.Empty() {
|
||||||
|
overwritePipelines := fb.config.OverwritePipelines
|
||||||
|
if b.InSetupCmd {
|
||||||
|
overwritePipelines = true
|
||||||
|
}
|
||||||
|
|
||||||
|
b.OverwritePipelinesCallback = func(esConfig *common.Config) error {
|
||||||
|
esClient, err := elasticsearch.NewConnectedClient(esConfig)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return fb.moduleRegistry.LoadPipelines(esClient, overwritePipelines)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// loadModulesPipelines is called when modules are configured to do the initial
|
// loadModulesPipelines is called when modules are configured to do the initial
|
||||||
// setup.
|
// setup.
|
||||||
func (fb *Filebeat) loadModulesPipelines(b *beat.Beat) error {
|
func (fb *Filebeat) loadModulesPipelines(b *beat.Beat) error {
|
||||||
@ -116,20 +161,26 @@ func (fb *Filebeat) loadModulesPipelines(b *beat.Beat) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
overwritePipelines := fb.config.OverwritePipelines
|
||||||
|
if b.InSetupCmd {
|
||||||
|
overwritePipelines = true
|
||||||
|
}
|
||||||
|
|
||||||
// register pipeline loading to happen every time a new ES connection is
|
// register pipeline loading to happen every time a new ES connection is
|
||||||
// established
|
// established
|
||||||
callback := func(esClient *elasticsearch.Client) error {
|
callback := func(esClient *elasticsearch.Client) error {
|
||||||
return fb.moduleRegistry.LoadPipelines(esClient)
|
return fb.moduleRegistry.LoadPipelines(esClient, overwritePipelines)
|
||||||
}
|
}
|
||||||
elasticsearch.RegisterConnectCallback(callback)
|
elasticsearch.RegisterConnectCallback(callback)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fb *Filebeat) loadModulesML(b *beat.Beat) error {
|
func (fb *Filebeat) loadModulesML(b *beat.Beat, kibanaConfig *common.Config) error {
|
||||||
logp.Debug("machine-learning", "Setting up ML jobs for modules")
|
|
||||||
var errs multierror.Errors
|
var errs multierror.Errors
|
||||||
|
|
||||||
|
logp.Debug("machine-learning", "Setting up ML jobs for modules")
|
||||||
|
|
||||||
if b.Config.Output.Name() != "elasticsearch" {
|
if b.Config.Output.Name() != "elasticsearch" {
|
||||||
logp.Warn("Filebeat is unable to load the Xpack Machine Learning configurations for the" +
|
logp.Warn("Filebeat is unable to load the Xpack Machine Learning configurations for the" +
|
||||||
" modules because the Elasticsearch output is not configured/enabled.")
|
" modules because the Elasticsearch output is not configured/enabled.")
|
||||||
@ -141,7 +192,34 @@ func (fb *Filebeat) loadModulesML(b *beat.Beat) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Errorf("Error creating Elasticsearch client: %v", err)
|
return errors.Errorf("Error creating Elasticsearch client: %v", err)
|
||||||
}
|
}
|
||||||
if err := fb.moduleRegistry.LoadML(esClient); err != nil {
|
|
||||||
|
if kibanaConfig == nil {
|
||||||
|
kibanaConfig = common.NewConfig()
|
||||||
|
}
|
||||||
|
|
||||||
|
if esConfig.Enabled() {
|
||||||
|
username, _ := esConfig.String("username", -1)
|
||||||
|
password, _ := esConfig.String("password", -1)
|
||||||
|
|
||||||
|
if !kibanaConfig.HasField("username") && username != "" {
|
||||||
|
kibanaConfig.SetString("username", -1, username)
|
||||||
|
}
|
||||||
|
if !kibanaConfig.HasField("password") && password != "" {
|
||||||
|
kibanaConfig.SetString("password", -1, password)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
kibanaClient, err := kibana.NewKibanaClient(kibanaConfig)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Errorf("Error creating Kibana client: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
kibanaVersion, err := common.NewVersion(kibanaClient.GetVersion())
|
||||||
|
if err != nil {
|
||||||
|
return errors.Errorf("Error checking Kibana version: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := setupMLBasedOnVersion(fb.moduleRegistry, esClient, kibanaClient, kibanaVersion); err != nil {
|
||||||
errs = append(errs, err)
|
errs = append(errs, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -167,15 +245,30 @@ func (fb *Filebeat) loadModulesML(b *beat.Beat) error {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := set.LoadML(esClient); err != nil {
|
if err := setupMLBasedOnVersion(set, esClient, kibanaClient, kibanaVersion); err != nil {
|
||||||
errs = append(errs, err)
|
errs = append(errs, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return errs.Err()
|
return errs.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func setupMLBasedOnVersion(reg *fileset.ModuleRegistry, esClient *elasticsearch.Client, kibanaClient *kibana.Client, kibanaVersion *common.Version) error {
|
||||||
|
if isElasticsearchLoads(kibanaVersion) {
|
||||||
|
return reg.LoadML(esClient)
|
||||||
|
}
|
||||||
|
return reg.SetupML(esClient, kibanaClient)
|
||||||
|
}
|
||||||
|
|
||||||
|
func isElasticsearchLoads(kibanaVersion *common.Version) bool {
|
||||||
|
if kibanaVersion.Major < 6 || kibanaVersion.Major == 6 && kibanaVersion.Minor < 1 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// Run allows the beater to be run as a beat.
|
// Run allows the beater to be run as a beat.
|
||||||
func (fb *Filebeat) Run(b *beat.Beat) error {
|
func (fb *Filebeat) Run(b *beat.Beat) error {
|
||||||
var err error
|
var err error
|
||||||
@ -200,7 +293,7 @@ func (fb *Filebeat) Run(b *beat.Beat) error {
|
|||||||
finishedLogger := newFinishedLogger(wgEvents)
|
finishedLogger := newFinishedLogger(wgEvents)
|
||||||
|
|
||||||
// Setup registrar to persist state
|
// Setup registrar to persist state
|
||||||
registrar, err := registrar.New(config.RegistryFile, config.RegistryFlush, finishedLogger)
|
registrar, err := registrar.New(config.RegistryFile, config.RegistryFilePermissions, config.RegistryFlush, finishedLogger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logp.Err("Could not init registrar: %v", err)
|
logp.Err("Could not init registrar: %v", err)
|
||||||
return err
|
return err
|
||||||
@ -210,7 +303,7 @@ func (fb *Filebeat) Run(b *beat.Beat) error {
|
|||||||
registrarChannel := newRegistrarLogger(registrar)
|
registrarChannel := newRegistrarLogger(registrar)
|
||||||
|
|
||||||
err = b.Publisher.SetACKHandler(beat.PipelineACKHandler{
|
err = b.Publisher.SetACKHandler(beat.PipelineACKHandler{
|
||||||
ACKEvents: newEventACKer(registrarChannel).ackEvents,
|
ACKEvents: newEventACKer(finishedLogger, registrarChannel).ackEvents,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logp.Err("Failed to install the registry with the publisher pipeline: %v", err)
|
logp.Err("Failed to install the registry with the publisher pipeline: %v", err)
|
||||||
@ -220,7 +313,7 @@ func (fb *Filebeat) Run(b *beat.Beat) error {
|
|||||||
outDone := make(chan struct{}) // outDone closes down all active pipeline connections
|
outDone := make(chan struct{}) // outDone closes down all active pipeline connections
|
||||||
crawler, err := crawler.New(
|
crawler, err := crawler.New(
|
||||||
channel.NewOutletFactory(outDone, b.Publisher, wgEvents).Create,
|
channel.NewOutletFactory(outDone, b.Publisher, wgEvents).Create,
|
||||||
config.Prospectors,
|
config.Inputs,
|
||||||
b.Info.Version,
|
b.Info.Version,
|
||||||
fb.done,
|
fb.done,
|
||||||
*once)
|
*once)
|
||||||
@ -261,7 +354,11 @@ func (fb *Filebeat) Run(b *beat.Beat) error {
|
|||||||
logp.Warn(pipelinesWarning)
|
logp.Warn(pipelinesWarning)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = crawler.Start(registrar, config.ConfigProspector, config.ConfigModules, pipelineLoaderFactory)
|
if config.OverwritePipelines {
|
||||||
|
logp.Debug("modules", "Existing Ingest pipelines will be updated")
|
||||||
|
}
|
||||||
|
|
||||||
|
err = crawler.Start(registrar, config.ConfigInput, config.ConfigModules, pipelineLoaderFactory, config.OverwritePipelines)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
crawler.Stop()
|
crawler.Stop()
|
||||||
return err
|
return err
|
||||||
@ -279,7 +376,7 @@ func (fb *Filebeat) Run(b *beat.Beat) error {
|
|||||||
|
|
||||||
var adiscover *autodiscover.Autodiscover
|
var adiscover *autodiscover.Autodiscover
|
||||||
if fb.config.Autodiscover != nil {
|
if fb.config.Autodiscover != nil {
|
||||||
adapter := NewAutodiscoverAdapter(crawler.ProspectorsFactory, crawler.ModulesFactory)
|
adapter := fbautodiscover.NewAutodiscoverAdapter(crawler.InputsFactory, crawler.ModulesFactory)
|
||||||
adiscover, err = autodiscover.NewAutodiscover("filebeat", adapter, config.Autodiscover)
|
adiscover, err = autodiscover.NewAutodiscover("filebeat", adapter, config.Autodiscover)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -291,7 +388,7 @@ func (fb *Filebeat) Run(b *beat.Beat) error {
|
|||||||
waitFinished.AddChan(fb.done)
|
waitFinished.AddChan(fb.done)
|
||||||
waitFinished.Wait()
|
waitFinished.Wait()
|
||||||
|
|
||||||
// Stop autodiscover -> Stop crawler -> stop prospectors -> stop harvesters
|
// Stop autodiscover -> Stop crawler -> stop inputs -> stop harvesters
|
||||||
// Note: waiting for crawlers to stop here in order to install wgEvents.Wait
|
// Note: waiting for crawlers to stop here in order to install wgEvents.Wait
|
||||||
// after all events have been enqueued for publishing. Otherwise wgEvents.Wait
|
// after all events have been enqueued for publishing. Otherwise wgEvents.Wait
|
||||||
// or publisher might panic due to concurrent updates.
|
// or publisher might panic due to concurrent updates.
|
||||||
|
17
vendor/github.com/elastic/beats/filebeat/channel/factory.go
generated
vendored
17
vendor/github.com/elastic/beats/filebeat/channel/factory.go
generated
vendored
@ -24,15 +24,15 @@ type clientEventer struct {
|
|||||||
wgEvents eventCounter
|
wgEvents eventCounter
|
||||||
}
|
}
|
||||||
|
|
||||||
// prospectorOutletConfig defines common prospector settings
|
// inputOutletConfig defines common input settings
|
||||||
// for the publisher pipline.
|
// for the publisher pipline.
|
||||||
type prospectorOutletConfig struct {
|
type inputOutletConfig struct {
|
||||||
// event processing
|
// event processing
|
||||||
common.EventMetadata `config:",inline"` // Fields and tags to add to events.
|
common.EventMetadata `config:",inline"` // Fields and tags to add to events.
|
||||||
Processors processors.PluginConfig `config:"processors"`
|
Processors processors.PluginConfig `config:"processors"`
|
||||||
|
|
||||||
// implicit event fields
|
// implicit event fields
|
||||||
Type string `config:"type"` // prospector.type
|
Type string `config:"type"` // input.type
|
||||||
|
|
||||||
// hidden filebeat modules settings
|
// hidden filebeat modules settings
|
||||||
Module string `config:"_module_name"` // hidden setting
|
Module string `config:"_module_name"` // hidden setting
|
||||||
@ -44,7 +44,7 @@ type prospectorOutletConfig struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewOutletFactory creates a new outlet factory for
|
// NewOutletFactory creates a new outlet factory for
|
||||||
// connecting a prospector to the publisher pipeline.
|
// connecting an input to the publisher pipeline.
|
||||||
func NewOutletFactory(
|
func NewOutletFactory(
|
||||||
done <-chan struct{},
|
done <-chan struct{},
|
||||||
pipeline beat.Pipeline,
|
pipeline beat.Pipeline,
|
||||||
@ -63,12 +63,12 @@ func NewOutletFactory(
|
|||||||
return o
|
return o
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create builds a new Outleter, while applying common prospector settings.
|
// Create builds a new Outleter, while applying common input settings.
|
||||||
// Prospectors and all harvesters use the same pipeline client instance.
|
// Inputs and all harvesters use the same pipeline client instance.
|
||||||
// This guarantees ordering between events as required by the registrar for
|
// This guarantees ordering between events as required by the registrar for
|
||||||
// file.State updates
|
// file.State updates
|
||||||
func (f *OutletFactory) Create(cfg *common.Config, dynFields *common.MapStrPointer) (Outleter, error) {
|
func (f *OutletFactory) Create(cfg *common.Config, dynFields *common.MapStrPointer) (Outleter, error) {
|
||||||
config := prospectorOutletConfig{}
|
config := inputOutletConfig{}
|
||||||
if err := cfg.Unpack(&config); err != nil {
|
if err := cfg.Unpack(&config); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -99,6 +99,9 @@ func (f *OutletFactory) Create(cfg *common.Config, dynFields *common.MapStrPoint
|
|||||||
fields["prospector"] = common.MapStr{
|
fields["prospector"] = common.MapStr{
|
||||||
"type": config.Type,
|
"type": config.Type,
|
||||||
}
|
}
|
||||||
|
fields["input"] = common.MapStr{
|
||||||
|
"type": config.Type,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
client, err := f.pipeline.ConnectWith(beat.ClientConfig{
|
client, err := f.pipeline.ConnectWith(beat.ClientConfig{
|
||||||
|
2
vendor/github.com/elastic/beats/filebeat/channel/interface.go
generated
vendored
2
vendor/github.com/elastic/beats/filebeat/channel/interface.go
generated
vendored
@ -8,7 +8,7 @@ import (
|
|||||||
// Factory is used to create a new Outlet instance
|
// Factory is used to create a new Outlet instance
|
||||||
type Factory func(*common.Config, *common.MapStrPointer) (Outleter, error)
|
type Factory func(*common.Config, *common.MapStrPointer) (Outleter, error)
|
||||||
|
|
||||||
// Outleter is the outlet for a prospector
|
// Outleter is the outlet for an input
|
||||||
type Outleter interface {
|
type Outleter interface {
|
||||||
Close() error
|
Close() error
|
||||||
OnEvent(data *util.Data) bool
|
OnEvent(data *util.Data) bool
|
||||||
|
68
vendor/github.com/elastic/beats/filebeat/config/config.go
generated
vendored
68
vendor/github.com/elastic/beats/filebeat/config/config.go
generated
vendored
@ -5,6 +5,7 @@ import (
|
|||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/elastic/beats/libbeat/autodiscover"
|
"github.com/elastic/beats/libbeat/autodiscover"
|
||||||
@ -21,21 +22,27 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type Config struct {
|
type Config struct {
|
||||||
Prospectors []*common.Config `config:"prospectors"`
|
Inputs []*common.Config `config:"inputs"`
|
||||||
RegistryFile string `config:"registry_file"`
|
Prospectors []*common.Config `config:"prospectors"`
|
||||||
RegistryFlush time.Duration `config:"registry_flush"`
|
RegistryFile string `config:"registry_file"`
|
||||||
ConfigDir string `config:"config_dir"`
|
RegistryFilePermissions os.FileMode `config:"registry_file_permissions"`
|
||||||
ShutdownTimeout time.Duration `config:"shutdown_timeout"`
|
RegistryFlush time.Duration `config:"registry_flush"`
|
||||||
Modules []*common.Config `config:"modules"`
|
ConfigDir string `config:"config_dir"`
|
||||||
ConfigProspector *common.Config `config:"config.prospectors"`
|
ShutdownTimeout time.Duration `config:"shutdown_timeout"`
|
||||||
ConfigModules *common.Config `config:"config.modules"`
|
Modules []*common.Config `config:"modules"`
|
||||||
Autodiscover *autodiscover.Config `config:"autodiscover"`
|
ConfigInput *common.Config `config:"config.inputs"`
|
||||||
|
ConfigProspector *common.Config `config:"config.prospectors"`
|
||||||
|
ConfigModules *common.Config `config:"config.modules"`
|
||||||
|
Autodiscover *autodiscover.Config `config:"autodiscover"`
|
||||||
|
OverwritePipelines bool `config:"overwrite_pipelines"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
DefaultConfig = Config{
|
DefaultConfig = Config{
|
||||||
RegistryFile: "registry",
|
RegistryFile: "registry",
|
||||||
ShutdownTimeout: 0,
|
RegistryFilePermissions: 0600,
|
||||||
|
ShutdownTimeout: 0,
|
||||||
|
OverwritePipelines: false,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -82,7 +89,15 @@ func mergeConfigFiles(configFiles []string, config *Config) error {
|
|||||||
return fmt.Errorf("Failed to read %s: %s", file, err)
|
return fmt.Errorf("Failed to read %s: %s", file, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
config.Prospectors = append(config.Prospectors, tmpConfig.Filebeat.Prospectors...)
|
if len(tmpConfig.Filebeat.Prospectors) > 0 {
|
||||||
|
cfgwarn.Deprecate("7.0.0", "prospectors are deprecated, Use `inputs` instead.")
|
||||||
|
if len(tmpConfig.Filebeat.Inputs) > 0 {
|
||||||
|
return fmt.Errorf("prospectors and inputs used in the configuration file, define only inputs not both")
|
||||||
|
}
|
||||||
|
tmpConfig.Filebeat.Inputs = append(tmpConfig.Filebeat.Inputs, tmpConfig.Filebeat.Prospectors...)
|
||||||
|
}
|
||||||
|
|
||||||
|
config.Inputs = append(config.Inputs, tmpConfig.Filebeat.Inputs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -97,7 +112,7 @@ func (config *Config) FetchConfigs() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
cfgwarn.Deprecate("7.0.0", "config_dir is deprecated. Use `filebeat.config.prospectors` instead.")
|
cfgwarn.Deprecate("7.0.0", "config_dir is deprecated. Use `filebeat.config.inputs` instead.")
|
||||||
|
|
||||||
// If configDir is relative, consider it relative to the config path
|
// If configDir is relative, consider it relative to the config path
|
||||||
configDir = paths.Resolve(paths.Config, configDir)
|
configDir = paths.Resolve(paths.Config, configDir)
|
||||||
@ -120,3 +135,30 @@ func (config *Config) FetchConfigs() error {
|
|||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ListEnabledInputs returns a list of enabled inputs sorted by alphabetical order.
|
||||||
|
func (config *Config) ListEnabledInputs() []string {
|
||||||
|
t := struct {
|
||||||
|
Type string `config:"type"`
|
||||||
|
}{}
|
||||||
|
var inputs []string
|
||||||
|
for _, input := range config.Inputs {
|
||||||
|
if input.Enabled() {
|
||||||
|
input.Unpack(&t)
|
||||||
|
inputs = append(inputs, t.Type)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sort.Strings(inputs)
|
||||||
|
return inputs
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsInputEnabled returns true if the plugin name is enabled.
|
||||||
|
func (config *Config) IsInputEnabled(name string) bool {
|
||||||
|
enabledInputs := config.ListEnabledInputs()
|
||||||
|
for _, input := range enabledInputs {
|
||||||
|
if name == input {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user