Update to libbeat 6.1

This commit is contained in:
Blerim Sheqa 2017-12-19 13:16:39 +01:00
parent 1b7f8a5bb5
commit dd3104e1c6
5299 changed files with 593047 additions and 543678 deletions

View File

@ -20,7 +20,7 @@ environment:
# Custom clone folder (variables are not expanded here).
clone_folder: c:\gopath\src\github.com\elastic\beats
# Cache mingw install until appveyor.yml is modified.
# Cache files until appveyor.yml is modified.
cache:
- C:\ProgramData\chocolatey\bin -> .appveyor.yml
- C:\ProgramData\chocolatey\lib -> .appveyor.yml
@ -42,7 +42,7 @@ install:
cinst mingw > mingw-install.txt
Push-AppveyorArtifact mingw-install.txt
}
- set PATH=C:\tools\mingw64\bin;%GOROOT%\bin;%PATH%
- set PATH=C:\tools\mingw64\bin;%PATH%
- set PATH=%GOPATH%\bin;%PATH%
- go install github.com/elastic/beats/vendor/github.com/pierrre/gotestcover
- go version
@ -55,7 +55,7 @@ install:
- set PYTHONPATH=C:\Python27
- set PATH=%PYTHONPATH%;%PYTHONPATH%\Scripts;%PATH%
- python --version
- pip install jinja2 nose nose-timer PyYAML redis elasticsearch
- pip install six jinja2 nose nose-timer PyYAML redis elasticsearch
- easy_install C:/pywin_inst.exe
# To run your custom scripts instead of automatic MSBuild

View File

@ -7,6 +7,10 @@ end_of_line = lf
insert_final_newline = true
trim_trailing_whitespace = true
[.go]
indent_size = 4
indent_style = tab
[*.json]
indent_size = 4
indent_style = space

View File

@ -1,19 +1,21 @@
# Directories
/.vagrant
/.idea
/.vscode
/build
/*/data
/*/logs
/*/_meta/kibana/5.x/index-pattern
/*/_meta/kibana/default/index-pattern
/*/fields.yml
/*/*.template*.json
# Files
.DS_Store
/glide.lock
/beats.iml
*.dev.yml
*.generated.yml
coverage.out
.python-version
beat.db
# Editor swap files
*.swp

View File

@ -1 +1 @@
1.7.6
1.9.2

13
vendor/github.com/elastic/beats/.pylintrc generated vendored Normal file
View File

@ -0,0 +1,13 @@
[MESSAGES CONTROL]
disable=too-many-lines,too-many-public-methods,too-many-statements
[BASIC]
method-rgx=[a-z_][a-z0-9_]{2,50}$
[FORMAT]
max-line-length=120

View File

@ -12,82 +12,118 @@ env:
global:
# Cross-compile for amd64 only to speed up testing.
- GOX_FLAGS="-arch amd64"
- DOCKER_COMPOSE_VERSION=1.9.0
- DOCKER_COMPOSE_VERSION=1.11.1
- GO_VERSION="$(cat .go-version)"
- TRAVIS_ETCD_VERSION=v3.2.8
matrix:
jobs:
include:
# General checks
- os: linux
env: TARGETS="check"
go: $GO_VERSION
stage: check
# Filebeat
- os: linux
env: TARGETS="-C filebeat testsuite"
go: $GO_VERSION
stage: test
- os: osx
env: TARGETS="TEST_ENVIRONMENT=0 -C filebeat testsuite"
go: $GO_VERSION
stage: test
# Heartbeat
- os: linux
env: TARGETS="-C heartbeat testsuite"
go: $GO_VERSION
stage: test
- os: osx
env: TARGETS="TEST_ENVIRONMENT=0 -C heartbeat testsuite"
go: $GO_VERSION
stage: test
# Auditbeat
- os: linux
env: TARGETS="-C auditbeat testsuite"
go: $GO_VERSION
stage: test
# Libbeat
- os: linux
env: TARGETS="-C libbeat testsuite"
go: $GO_VERSION
stage: test
- os: linux
env: TARGETS="-C libbeat crosscompile"
go: $GO_VERSION
stage: test
# Metricbeat
- os: linux
env: TARGETS="-C metricbeat testsuite"
go: $GO_VERSION
stage: test
- os: osx
env: TARGETS="TEST_ENVIRONMENT=0 -C metricbeat testsuite"
go: $GO_VERSION
stage: test
- os: linux
env: TARGETS="-C metricbeat crosscompile"
go: $GO_VERSION
stage: test
# Packetbeat
- os: linux
env: TARGETS="-C packetbeat testsuite"
go: $GO_VERSION
stage: test
# Winlogbeat
- os: linux
env: TARGETS="-C winlogbeat crosscompile"
go: $GO_VERSION
# Dashboards
- os: linux
env: TARGETS="-C libbeat/dashboards"
go: $GO_VERSION
stage: test
# Generators
- os: linux
env: TARGETS="-C generator/metricbeat test"
go: $GO_VERSION
stage: test
- os: linux
env: TARGETS="-C generator/beat test"
go: $GO_VERSION
stage: test
# Kubernetes
- os: linux
install: deploy/kubernetes/.travis/setup.sh
env:
- TARGETS="-C deploy/kubernetes test"
- TRAVIS_KUBE_VERSION=v1.6.11
stage: test
- os: linux
install: deploy/kubernetes/.travis/setup.sh
env:
- TARGETS="-C deploy/kubernetes test"
- TRAVIS_KUBE_VERSION=v1.7.7
stage: test
- os: linux
install: deploy/kubernetes/.travis/setup.sh
env:
- TARGETS="-C deploy/kubernetes test"
- TRAVIS_KUBE_VERSION=v1.8.0
stage: test
addons:
apt:
packages:
- python-virtualenv
- libpcap-dev
- geoip-database
before_install:
- python --version
- umask 022
- chmod -R go-w $GOPATH/src/github.com/elastic/beats
# Docker-compose installation
@ -112,6 +148,7 @@ notifications:
after_success:
# Copy full.cov to coverage.txt because codecov.io requires this file
- test -f auditbeat/build/coverage/full.cov && bash <(curl -s https://codecov.io/bash) -f auditbeat/build/coverage/full.cov
- test -f filebeat/build/coverage/full.cov && bash <(curl -s https://codecov.io/bash) -f filebeat/build/coverage/full.cov
- test -f heartbeat/build/coverage/full.cov && bash <(curl -s https://codecov.io/bash) -f heartbeat/build/coverage/full.cov
- test -f libbeat/build/coverage/full.cov && bash <(curl -s https://codecov.io/bash) -f libbeat/build/coverage/full.cov

View File

@ -8,12 +8,14 @@
// Template, add newest changes here
=== Beats version HEAD
https://github.com/elastic/beats/compare/v5.6.5...5.6[Check the HEAD diff]
https://github.com/elastic/beats/compare/v6.1.1...6.1[Check the HEAD diff]
==== Breaking changes
*Affecting all Beats*
*Auditbeat*
*Filebeat*
*Heartbeat*
@ -28,6 +30,8 @@ https://github.com/elastic/beats/compare/v5.6.5...5.6[Check the HEAD diff]
*Affecting all Beats*
*Auditbeat*
*Filebeat*
*Heartbeat*
@ -42,6 +46,8 @@ https://github.com/elastic/beats/compare/v5.6.5...5.6[Check the HEAD diff]
*Affecting all Beats*
*Auditbeat*
*Filebeat*
*Heartbeat*
@ -68,61 +74,671 @@ https://github.com/elastic/beats/compare/v5.6.5...5.6[Check the HEAD diff]
==== Known Issue
*Affecting all Beats*
*Filebeat*
*Heartbeat*
*Metricbeat*
*Packetbeat*
*Winlogbeat*
////////////////////////////////////////////////////////////
[[release-notes-5.6.5]]
=== Beats version 5.6.5
https://github.com/elastic/beats/compare/v5.6.4...v5.6.5[View commits]
[[release-notes-6.1.1]]
=== Beats version 6.1.1
https://github.com/elastic/beats/compare/v6.1.0...v6.1.1[View commits]
==== Bugfixes
No changes in this release.
*Affecting all Beats*
[[release-notes-6.1.0]]
=== Beats version 6.1.0
https://github.com/elastic/beats/compare/v6.0.1...v6.1.0[View commits]
- Fix duplicate batches of events in retry queue. {pull}5520[5520]
==== Breaking changes
*Auditbeat*
- Changed `audit.file.path` to be a multi-field so that path is searchable. {pull}5625[5625]
*Metricbeat*
- Clarify meaning of percentages reported by system core metricset. {pull}5565[5565]
- Fix map overwrite in docker diskio module. {issue}5582[5582]
[[release-notes-5.6.4]]
=== Beats version 5.6.4
https://github.com/elastic/beats/compare/v5.6.3...v5.6.4[View commits]
- Rename `heap_init` field to `heap.init` in the Elasticsearch module. {pull}5320[5320]
- Rename `http.response.status_code` field to `http.response.code` in the HTTP module. {pull}5521[5521]
==== Bugfixes
*Affecting all Beats*
- Fix race condition in internal logging rotator. {pull}4519[4519]
- Remove ID() from Runner interface {issue}5153[5153]
- Correctly send configured `Host` header to the remote server. {issue}4842[4842]
- Change add_kubernetes_metadata to attempt detection of namespace. {pull}5482[5482]
- Avoid double slash when join url and path {pull}5517[5517]
- Fix console color output for Windows. {issue}5611[5611]
- Fix logstash output debug message. {pull}5799{5799]
- Fix isolation of modules when merging local and global field settings. {issue}5795[5795]
*Filebeat*
- Add support for adding string tags {pull}5395[5395]
- Fix race condition when limiting the number of harvesters running in parallel {issue}5458[5458]
- Fix relative paths in the prospector definitions. {pull}5443[5443]
- Fix `recursive_globe.enabled` option. {pull}5443[5443]
*Metricbeat*
- Change field type of http header from nested to object {pull}5258[5258]
- Fix the fetching of process information when some data is missing under MacOS X. {issue}5337[5337]
- Change `MySQL active connections` visualization title to `MySQL total connections`. {issue}4812[4812]
- Fix `ProcState` on Linux and FreeBSD when process names contain parentheses. {pull}5775[5775]
- Fix incorrect `Mem.Used` calculation under linux. {pull}5775[5775]
*Packetbeat*
- Fix missing length check in the PostgreSQL module. {pull}5457[5457]
- Fix http status phrase parsing not allow spaces. {pull}5312[5312]
- Fix http parse to allow to parse get request with space in the URI. {pull}5495[5495]
*Winlogbeat*
- Fix the registry file. It was not correctly storing event log names, and
upon restart it would begin reading at the start of each event log. {issue}5813[5813]
==== Added
*Affecting all Beats*
- Support dashboard loading without Elasticseach {pull}5653[5653]
- Changed the hashbang used in the beat helper script from `/bin/bash` to `/usr/bin/env bash`. {pull}5051[5051]
- Changed beat helper script to use `exec` when running the beat. {pull}5051[5051]
- Fix reloader error message to only print on actual error {pull}5066[5066]
- Add support for enabling TLS renegotiation. {issue}4386[4386]
- Add setting to enable/disable the slow start in logstash output. {pull}5400[5400]
- Add Azure VM support for add_cloud_metadata processor {pull}5355[5355]
- Add `output.file.permission` config option. {pull}4638[4638]
- Refactor add_kubernetes_metadata to support autodiscovery {pull}5434[5434]
- Improve custom flag handling and CLI flags usage message. {pull}5543[5543]
- Add number_of_routing_shards config set to 30 {pull}5570[5570]
- Set log level for kafka output. {pull}5397[5397]
- Move TCP UDP start up into `server.Start()` {pull}4903[4903]
- Update to Golang 1.9.2
[[release-notes-5.6.3]]
=== Beats version 5.6.3
https://github.com/elastic/beats/compare/v5.6.2...v5.6.3[View commits]
*Auditbeat*
No changes in this release.
- Add support for SHA3 hash algorithms to the file integrity module. {issue}5345[5345]
- Add dashboards for Linux audit framework events (overview, executions, sockets). {pull}5516[5516]
*Filebeat*
- Add PostgreSQL module with slowlog support. {pull}4763[4763]
- Add Kafka log module. {pull}4885[4885]
- Add support for `/var/log/containers/` log path in `add_kubernetes_metadata` processor. {pull}4981[4981]
- Remove error log from runnerfactory as error is returned by API. {pull}5085[5085]
- Add experimental Docker `json-file` prospector . {pull}5402[5402]
- Add experimental Docker autodiscover functionality. {pull}5245[5245]
- Add option to convert the timestamps to UTC in the system module. {pull}5647[5647]
- Add Logstash module support for main log and the slow log, support the plain text or structured JSON format {pull}5481[5481]
*Metricbeat*
- Add graphite protocol metricbeat module. {pull}4734[4734]
- Add http server metricset to support push metrics via http. {pull}4770[4770]
- Make config object public for graphite and http server {pull}4820[4820]
- Add system uptime metricset. {issue}4848[4848]
- Add experimental `queue` metricset to RabbitMQ module. {pull}4788[4788]
- Add additional php-fpm pool status kpis for Metricbeat module {pull}5287[5287]
- Add etcd module. {issue}4970[4970]
- Add ip address of docker containers to event. {pull}5379[5379]
- Add ceph osd tree information to Metricbeat {pull}5498[5498]
- Add basic Logstash module. {pull}5540[5540]
- Add dashboard for Windows service metricset. {pull}5603[5603]
- Add experimental Docker autodiscover functionality. {pull}5245[5245]
- Add Windows service metricset in the windows module. {pull}5332[5332]
- Update gosigar to v0.6.0. {pull}5775[5775]
*Packetbeat*
- Add support for decoding the TLS envelopes. {pull}5476[5476]
[[release-notes-6.0.1]]
=== Beats version 6.0.1
https://github.com/elastic/beats/compare/v6.0.0...v6.0.1[View commits]
==== Bugfixes
*Affecting all Beats*
- Fix documentation links in README.md files. {pull}5710[5710]
- Fix `add_docker_metadata` dropping some containers. {pull}5788[5788]
*Heartbeat*
- Fix the "HTTP up status" visualization. {pull}5564[5564]
*Metricbeat*
- Fix map overwrite in docker diskio module. {issue}5582[5582]
- Fix connection leak in mongodb module. {issue}5688[5688]
- Fix the include top N processes feature for cases where there are fewer
processes than N. {pull}5729[5729]
include::libbeat/docs/release-notes/6.0.0.asciidoc[]
[[release-notes-6.0.0-ga]]
=== Beats version 6.0.0-GA
https://github.com/elastic/beats/compare/v6.0.0-rc2...v6.0.0[View commits]
The list below covers the changes between 6.0.0-rc2 and 6.0.0 GA only.
==== Bugfixes
*Filebeat*
- Fix machine learning jobs setup for dynamic modules. {pull}5509[5509]
*Packetbeat*
- Fix missing length check in the PostgreSQL module. {pull}5457[5457]
- Fix panic in ACK handler if event is dropped on blocked queue {issue}5524[5524]
==== Added
*Filebeat*
- Add Kubernetes manifests to deploy Filebeat. {pull}5349[5349]
*Metricbeat*
- Add Kubernetes manifests to deploy Metricbeat. {pull}5349[5349]
[[release-notes-6.0.0-rc2]]
=== Beats version 6.0.0-rc2
https://github.com/elastic/beats/compare/v6.0.0-rc1...v6.0.0-rc2[View commits]
==== Breaking changes
*Packetbeat*
- Remove not-working `runoptions.uid` and `runoptions.gid` options in Packetbeat. {pull}5261[5261]
==== Bugfixes
*Affecting all Beats*
- Fix data race accessing watched containers. {issue}5147[5147]
- Do not require template if index change and template disabled {pull}5319[5319]
- Fix missing ACK in redis output. {issue}5404[5404]
*Filebeat*
- Fix default paths for redis 4.0.1 logs on macOS {pull}5173[5173]
- Fix Filebeat not starting if command line and modules configs are used together. {issue}5376[5376]
- Fix double `@timestamp` field when JSON decoding was used. {pull}5436[5436]
*Metricbeat*
- Use `beat.name` instead of `beat.hostname` in the Host Overview dashboard. {pull}5340[5340]
- Fix the loading of 5.x dashboards. {issue}5277[5277]
==== Added
*Metricbeat*
- Auto-select a hostname (based on the host on which the Beat is running) in the Host Overview dashboard. {pull}5340[5340]
==== Deprecated
*Filebeat*
- The `filebeat.config_dir` option is deprecated. Use `filebeat.config.prospector` options instead. {pull}5321[5321]
[[release-notes-6.0.0-rc1]]
=== Beats version 6.0.0-rc1
https://github.com/elastic/beats/compare/v6.0.0-beta2...v6.0.0-rc1[View commits]
==== Bugfixes
*Affecting all Beats*
- Fix the `/usr/bin/beatname` script to accept `-d "*"` as a parameter. {issue}5040[5040]
- Combine `fields.yml` properties when they are defined in different sources. {issue}5075[5075]
- Keep Docker & Kubernetes pod metadata after container dies while they are needed by processors. {pull}5084[5084]
- Fix `fields.yml` lookup when using `export template` with a custom `path.config` param. {issue}5089[5089]
- Remove runner creation from every reload check {pull}5141[5141]
- Fix add_kubernetes_metadata matcher registry lookup. {pull}5159[5159]
*Metricbeat*
- Fix a memory allocation issue where more memory was allocated than needed in the windows-perfmon metricset. {issue}5035[5035]
- Don't start metricbeat if external modules config is wrong and reload is disabled {pull}5053[5053]
- The MongoDB module now connects on each fetch, to avoid stopping the whole Metricbeat instance if MongoDB is not up when starting. {pull}5120[5120]
- Fix kubernetes events module to be able to index time fields properly. {issue}5093[5093]
- Fixed `cmd_set` and `cmd_get` being mixed in the Memcache module. {pull}5189[5189]
==== Added
*Affecting all Beats*
- Enable flush timeout by default. {pull}5150[5150]
- Add @metadata.version to events send to Logstash. {pull}5166[5166]
*Auditbeat*
- Changed the number of shards in the default configuration to 3. {issue}5095[5095]
- Add support for receiving audit events using a multicast socket. {issue}4850[4850]
*Filebeat*
- Changed the number of shards in the default configuration to 3. {issue}5095[5095]
- Don't start filebeat if external modules/prospectors config is wrong and reload is disabled {pull}5053[5053]
- Add `filebeat.registry_flush` setting, to delay the registry updates. {pull}5146[5146]
*Heartbeat*
- Changed the number of shards in the default configuration to 1. {issue}5095[5095]
*Packetbeat*
- Changed the number of shards in the default configuration to 3. {issue}5095[5095]
*Winlogbeat*
- Changed the number of shards in the default configuration to 3. {issue}5095[5095]
[[release-notes-6.0.0-beta2]]
=== Beats version 6.0.0-beta2
https://github.com/elastic/beats/compare/v6.0.0-beta1...v6.0.0-beta2[View commits]
==== Breaking changes
*Affecting all Beats*
- The log directory (`path.log`) for Windows services is now set to `C:\ProgramData\[beatname]\logs`. {issue}4764[4764]
- The _all field is disabled in Elasticsearch 6.0. This means that searching by individual
words only work on text fields. {issue}4901[4901]
- Fail if removed setting output.X.flush_interval is explicitly configured.
- Rename the `/usr/bin/beatname.sh` script (e.g. `metricbeat.sh`) to `/usr/bin/beatname`. {pull}4933[4933]
- Beat does not start if elasticsearch index pattern was modified but not the template name and pattern. {issue}4769[4769]
- Fail if removed setting output.X.flush_interval is explicitly configured. {pull}4880[4880]
==== Bugfixes
*Affecting all Beats*
- Register kubernetes `field_format` matcher and remove logger in `Encode` API {pull}4888[4888]
- Fix go plugins not loaded when beat starts {pull}4799[4799]
- Add support for `initContainers` in `add_kubernetes_metadata` processor. {issue}4825[4825]
- Eliminate deprecated _default_ mapping in 6.x {pull}4864[4864]
- Fix pod name indexer to use both namespace, pod name to frame index key {pull}4775[4775]
*Filebeat*
- Fix issue where the `fileset.module` could have the wrong value. {issue}4761[4761]
*Heartbeat*
- Fix monitor.name being empty by default. {issue}4852[4852]
- Fix wrong event timestamps. {issue}4851[4851]
*Metricbeat*
- Added missing mongodb configuration file to the `modules.d` folder. {pull}4870[4870]
- Fix wrong MySQL CRUD queries timelion visualization {pull}4857[4857]
- Add new metrics to CPU metricsset {pull}4969[4969]
*Packetbeat*
- Update flow timestamp on each packet being received. {issue}4895[4895]
==== Added
*Affecting all Beats*
- Add setting to enable/disable the slow start in logstash output. {pull}4972[4972]
- Update init scripts to use the `test config` subcommand instead of the deprecated `-configtest` flag. {issue}4600[4600]
- Get by default the credentials for connecting to Kibana from the Elasticsearch output configuration. {pull}4867[4867]
- Added `cloud.id` and `cloud.auth` settings, for simplifying using Beats with the Elastic Cloud. {issue}4959[4959]
- Add lz4 compression support to kafka output. {pull}4977[4977]
- Add newer kafka versions to kafka output. {pull}4977[4977]
- Configure the index name when loading the dashboards and the index pattern. {pull}4949[4949]
*Metricbeat*
- Add `filesystem.ignore_types` to system module for ignoring filesystem types. {issue}4685[4685]
- Add support to exclude labels from kubernetes pod metadata. {pull}4757[4757]
[[release-notes-6.0.0-beta1]]
=== Beats version 6.0.0-beta1
https://github.com/elastic/beats/compare/v6.0.0-alpha2...v6.0.0-beta1[View commits]
==== Breaking changes
*Affecting all Beats*
- Rename `kubernetes` processor to `add_kubernetes_metadata`. {pull}4473[4473]
- Rename `*.full.yml` config files to `*.reference.yml`. {pull}4563[4563]
- The `scripts/import_dashboards` is removed from packages. Use the `setup` command instead. {pull}4586[4586]
- Change format of the saved kibana dashboards to have a single JSON file for each dashboard {pull}4413[4413]
- Rename `configtest` command to `test config`. {pull}4590[4590]
- Remove setting `queue_size` and `bulk_queue_size`. {pull}4650[4650]
- Remove setting `dashboard.snapshot` and `dashboard.snapshot_url`. They are no longer needed because the
dashboards are included in the packages by default. {pull}4675[4675]
- Beats can no longer be launched from Windows Explorer (GUI), command line is required. {pull}4420[4420]
*Auditbeat*
- Changed file metricset config to make `file.paths` a list instead of a dictionary. {pull}4796[4796]
*Heartbeat*
- Renamed the heartbeat RPM/DEB name to `heartbeat-elastic`. {pull}4601[4601]
*Metricbeat*
- Change all `system.cpu.*.pct` metrics to be scaled by the number of CPU cores.
This will make the CPU usage percentages from the system cpu metricset consistent
with the system process metricset. The documentation for these metrics already
stated that on multi-core systems the percentages could be greater than 100%. {pull}4544[4544]
- Remove filters setting from metricbeat modules. {pull}4699[4699]
- Added `type` field to filesystem metrics. {pull}4717[4717]
*Packetbeat*
- Remove the already unsupported `pf_ring` sniffer option. {pull}4608[4608]
==== Bugfixes
*Affecting all Beats*
- Don't stop with error loading the ES template if the ES output is not enabled. {pull}4436[4436]
- Fix race condition in internal logging rotator. {pull}4519[4519]
- Normalize all times to UTC to ensure proper index naming. {issue}4569[4569]
- Fix issue with loading dashboards to ES 6.0 when .kibana index did not already exist. {issue}4659[4659]
*Auditbeat*
- Fix `file.max_file_size` config option for the audit file metricset. {pull}4796[4796]
*Filebeat*
- Fix issue where the `fileset.module` could have the wrong value. {issue}4761[4761]
*Metricbeat*
- Fix issue affecting Windows services timing out at startup. {pull}4491[4491]
- Fix incorrect docker.diskio.total metric calculation. {pull}4507[4507]
- Vsphere module: used memory field corrected. {issue}4461[4461]
*Packetbeat*
- Enabled /proc/net/tcp6 scanning and fixed ip v6 parsing. {pull}4442[4442]
*Winlogbeat*
- Removed validation of top-level config keys. This behavior was inconsistent with other Beats
and caused maintainability issues. {pull}4657[4657]
==== Added
*Affecting all Beats*
- New cli subcommands interface. {pull}4420[4420]
- Allow source path matching in `add_docker_metadata` processor. {pull}4495[4495]
- Add support for analyzers and multifields in fields.yml. {pull}4574[4574]
- Add support for JSON logging. {pull}4523[4523]
- Add `test output` command, to test Elasticsearch and Logstash output settings. {pull}4590[4590]
- Introduce configurable event queue settings: queue.mem.events, queue.mem.flush.min_events and queue.mem.flush.timeout. {pull}4650[4650]
- Enable pipelining in Logstash output by default. {pull}4650[4650]
- Added 'result' field to Elasticsearch QueryResult struct for compatibility with 6.x Index and Delete API responses. {issue]4661[4661]
- The sample dashboards are now included in the Beats packages. {pull}4675[4675]
- Add `pattern` option to be used in the fields.yml to specify the pattern for a number field. {pull}4731[4731]
*Auditbeat*
- Added `file.hash_types` config option for controlling the hash types. {pull}4796[4796]
- Added the ability to specify byte unit suffixes to `file.max_file_size`. {pull}4796[4796]
*Filebeat*
- Add experimental Redis module. {pull}4441[4441]
- Nginx module: use the first not-private IP address as the remote_ip. {pull}4417[4417]
- Load Ingest Node pipelines when the Elasticsearch connection is established, instead of only once at startup. {pull}4479[4479]
- Add support for loading Xpack Machine Learning configurations from the modules, and added sample configurations for the Nginx module. {pull}4506[4506] {pull}4609[4609]
- Add udp prospector type. {pull}4452[4452]
- Enabled Cgo which means libc is dynamically compiled. {pull}4546[4546]
- Add Beta module config reloading mechanism {pull}4566[4566]
- Remove spooler and publisher components and settings. {pull}4644[4644]
*Heartbeat*
- Enabled Cgo which means libc is dynamically compiled. {pull}4546[4546]
*Metricbeat*
- Add random startup delay to each metricset to avoid the thundering herd problem. {issue}4010[4010]
- Add the ability to configure audit rules to the kernel module. {pull}4482[4482]
- Add the ability to configure kernel's audit failure mode. {pull}4516[4516]
- Add experimental Aerospike module. {pull}4560[4560]
- Vsphere module: collect custom fields from virtual machines. {issue}4464[4464]
- Add `test modules` command, to test modules expected output. {pull}4656[4656]
- Add `processors` setting to metricbeat modules. {pull}4699[4699]
- Support `npipe` protocol (Windows) in Docker module. {pull}4751[4751]
*Winlogbeat*
- Add the ability to use LevelRaw if Level isn't populated in the event XML. {pull}4257[4257]
*Auditbeat*
- Add file integrity metricset to the audit module. {pull}4486[4486]
[[release-notes-6.0.0-alpha2]]
=== Beats version 6.0.0-alpha2
https://github.com/elastic/beats/compare/v6.0.0-alpha1...v6.0.0-alpha2[View commits]
==== Breaking changes
*Filebeat*
- Rename `input_type` field to `prospector.type` {pull}4294[4294]
- The `@metadata.type` field, added by the Logstash output, is now hardcoded to `doc` and will be removed in future versions. {pull}4331[4331].
==== Bugfixes
*Affecting all Beats*
- Fix importing the dashboards when the limit for max open files is too low. {issue}4244[4244]
- Fix configuration documentation for kubernetes processor {pull}4313[4313]
- Fix misspelling in `add_locale` configuration option for abbreviation.
*Filebeat*
- Fix race condition on harvester stopping with reloading enabled. {issue}3779[3779]
- Fix recursive glob config parsing and resolution across restarts. {pull}4269[4269]
- Allow string characters in user agent patch version (NGINX and Apache) {pull}4415[4415]
- Fix grok pattern in filebeat module system/auth without hostname. {pull}4224[4224]
*Metricbeat*
- Set correct format for percent fields in memory module. {pull}4619[4619]
- Fix a debug statement that said a module wrapper had stopped when it hadn't. {pull}4264[4264]
- Use MemAvailable value from /proc/meminfo on Linux 3.14. {pull}4316[4316]
- Fix panic when events were dropped by filters. {issue}4327[4327]
- Add filtering to system filesystem metricset to remove relative mountpoints like those
from Linux network namespaces. {pull}4370[4370]
- Remove unnecessary print statement in schema apis. {pull}4355[4355]
- Fix type of field `haproxy.stat.check.health.last`. {issue}4407[4407]
*Packetbeat*
- Enable memcache filtering only if a port is specified in the config file. {issue}4335[4335]
- Enable memcache filtering only if a port is specified in the config file. {issue}4335[4335]
==== Added
*Affecting all Beats*
- Upgraded to Golang 1.8.3. {pull}4401[4401]
- Added the possibility to set Elasticsearch mapping template settings from the Beat configuration file. {pull}4284[4284] {pull}4317[4317]
- Add a variable to the SysV init scripts to make it easier to change the user. {pull}4340[4340]
- Add the option to write the generated Elasticsearch mapping template into a file. {pull}4323[4323]
- Add `instance_name` in GCE add_cloud_metadata processor. {pull}4414[4414]
- Add `add_docker_metadata` processor. {pull}4352[4352]
- Add `logging.files` `permissions` option. {pull}4295[4295]
*Filebeat*
- Added ability to sort harvested files. {pull}4374[4374]
- Add experimental Redis slow log prospector type. {pull}4180[4180]
*Metricbeat*
- Add macOS implementation of the system diskio metricset. {issue}4144[4144]
- Add process_summary metricset that records high level metrics about processes. {pull}4231[4231]
- Add `kube-state-metrics` based metrics to `kubernetes` module {pull}4253[4253]
- Add debug logging to Jolokia JMX metricset. {pull}4341[4341]
- Add events metricset for kubernetes metricbeat module {pull}4315[4315]
- Change Metricbeat default configuration file to be better optimized for most users. {pull}4329[4329]
- Add experimental RabbitMQ module. {pull}4394[4394]
- Add Kibana dashboard for the Kubernetes modules. {pull}4138[4138]
*Packetbeat*
*Winlogbeat*
==== Deprecated
*Affecting all Beats*
- The `@metadata.type` field, added by the Logstash output, is deprecated, hardcoded to `doc` and will be removed in future versions. {pull}4331[4331].
*Filebeat*
- Deprecate `input_type` prospector config. Use `type` config option instead. {pull}4294[4294]
==== Known Issue
- If the Elasticsearch output is not enabled, but `setup.template` options are
present (like it's the case in the default Metricbeat configuration), the
Beat stops with an error: "Template loading requested but the Elasticsearch
output is not configured/enabled". To avoid this error, disable the template
loading explicitly `setup.template.enabled: false`.
[[release-notes-6.0.0-alpha1]]
=== Beats version 6.0.0-alpha1
https://github.com/elastic/beats/compare/v5.4.0...v6.0.0-alpha1[View commits]
==== Breaking changes
*Affecting all Beats*
- Introduce beat version in the Elasticsearch index and mapping template {pull}3527[3527]
- Usage of field `_type` is now ignored and hardcoded to `doc`. {pull}3757[3757]
- Change vendor manager from glide to govendor. {pull}3851[3851]
- Rename `error` field to `error.message`. {pull}3987[3987]
- Change `dashboards.*` config options to `setup.dashboards.*`. {pull}3921[3921]
- Change `outputs.elasticsearch.template.* to `setup.template.*` {pull}4080[4080]
*Filebeat*
- Remove code to convert states from 1.x. {pull}3767[3767]
- Remove deprecated config options `force_close_files` and `close_older`. {pull}3768[3768]
- Change `clean_removed` behaviour to also remove states for files which cannot be found anymore under the same name. {pull}3827[3827]
- Remove `document_type` config option. Use `fields` instead. {pull}4204[4204]
- Move `json_error` under `error.message` and `error.key`. {pull}4167[4167]
*Packetbeat*
- Remove deprecated `geoip`. {pull}3766[3766]
- Replace `waitstop` command line argument by `shutdown_timeout` in configuration file. {pull}3588[3588]
*Winlogbeat*
- Remove metrics endpoint. Replaced by http endpoint in libbeat (see #3717). {pull}3901[3901]
==== Bugfixes
*Affecting all Beats*
- Add `_id`, `_type`, `_index` and `_score` fields in the generated index pattern. {pull}3282[3282]
*Filebeat*
- Fix the Mysql slowlog parsing of IP addresses. {pull}4183[4183]
- Fix issue that new prospector was not reloaded on conflict {pull}4128[4128]
*Heartbeat*
- Use IP type of elasticsearch for ip field. {pull}3926[3926]
*Metricbeat*
- Support `common.Time` in `mapstriface.toTime()` {pull}3812[3812]
- Fix MongoDB `dbstats` fields mapping. {pull}4025[4025]
- Fixing prometheus collector to aggregate metrics based on metric family. {pull}4075[4075]
- Fixing multiEventFetch error reporting when no events are returned {pull}4153[4153]
==== Added
*Affecting all Beats*
- Initialize a beats UUID from file on startup. {pull}3615[3615]
- Add new `add_locale` processor to export the local timezone with an event. {pull}3902[3902]
- Add http endpoint. {pull}3717[3717]
- Updated to Go 1.8.1. {pull}4033[4033]
- Add kubernetes processor {pull}3888[3888]
- Add support for `include_labels` and `include_annotations` in kubernetes processor {pull}4043[4043]
- Support new `index_patterns` field when loading templates for Elasticsearch >= 6.0 {pull}4056[4056]
- Adding goimports support to make check and fmt {pull}4114[4114]
- Make kubernetes indexers/matchers pluggable {pull}4151[4151]
- Abstracting pod interface in kubernetes plugin to enable easier vendoring {pull}4152[4152]
*Filebeat*
- Restructure `input.Event` to be inline with `outputs.Data` {pull}3823[3823]
- Add base for supporting prospector level processors {pull}3853[3853]
- Add `filebeat.config.path` as replacement for `config_dir`. {pull}4051[4051]
- Add a `recursive_glob.enabled` setting to expand `**` in patterns. {pull}3980[3980]
- Add Icinga module. {pull}3904[3904]
- Add ability to parse nginx logs exposing the X-Forwarded-For header instead of the remote address.
*Heartbeat*
- Event format and field naming changes in Heartbeat and sample Dashboard. {pull}4091[4091]
*Metricbeat*
- Add experimental metricset `perfmon` to Windows module. {pull}3758[3758]
- Add memcached module with stats metricset. {pull}3693[3693]
- Add the `process.cmdline.cache.enabled` config option to the System Process Metricset. {pull}3891[3891]
- Add new MetricSet interfaces for developers (`Closer`, `ReportingFetcher`, and `PushMetricSet`). {pull}3908[3908]
- Add kubelet module {pull}3916[3916]
- Add dropwizard module {pull}4022[4022]
- Adding query APIs for metricsets and modules from metricbeat registry {pull}4102[4102]
- Fixing nil pointer on prometheus collector when http response is nil {pull}4119[4119]
- Add http module with json metricset. {pull}4092[4092]
- Add the option to the system module to include only the first top N processes by CPU and memory. {pull}4127[4127].
- Add experimental Vsphere module. {pull}4028[4028]
- Add experimental Elasticsearch module. {pull}3903[3903]
- Add experimental Kibana module. {pull}3895[3895]
- Move elasticsearch metricset node_stats under node.stats namespace. {pull}4142[4142]
- Make IP port indexer constructor public {pull}4434[4434]
*Packetbeat*
- Add `fields` and `fields_under_root` to Packetbeat protocols configurations. {pull}3518[3518]
- Add list style Packetbeat protocols configurations. This change supports specifying multiple configurations of the same protocol analyzer. {pull}3518[3518]
*Winlogbeat*
==== Deprecated
*Affecting all Beats*
- Usage of field `_type` is deprecated. It should not be used in queries or dashboards. {pull}3409[3409]
*Packetbeat*
- Deprecate dictionary style protocols configuration. {pull}3518[3518]
*Winlogbeat*
==== Known Issue
*Filebeat*
- Prospector reloading only works properly with new files. {pull}3546[3546]
[[release-notes-5.6.2]]
=== Beats version 5.6.2
@ -170,7 +786,6 @@ https://github.com/elastic/beats/compare/v5.5.3...v5.6.0[View commits]
*Affecting all Beats*
- Add option to the import_dashboards script to load the dashboards via Kibana API. {pull}4682[4682]
- Add `logging.files` `permissions` option. {pull}4295[4295]
*Filebeat*
@ -198,7 +813,6 @@ No changes in this release.
https://github.com/elastic/beats/compare/v5.5.1...v5.5.2[View commits]
No changes in this release.
[[release-notes-5.5.1]]
=== Beats version 5.5.1
https://github.com/elastic/beats/compare/v5.5.0...v5.5.1[View commits]
@ -303,7 +917,7 @@ https://github.com/elastic/beats/compare/v5.4.0...v5.4.1[View commits]
[[release-notes-5.4.0]]
=== Beats version 5.4.0
https://github.com/elastic/beats/compare/v5.3.1...v5.4.0[View commits]
https://github.com/elastic/beats/compare/v5.3.2...v5.4.0[View commits]
==== Bugfixes
@ -318,7 +932,6 @@ https://github.com/elastic/beats/compare/v5.3.1...v5.4.0[View commits]
- Allow log lines without a program name in the Syslog fileset. {pull}3944[3944]
- Don't stop Filebeat when modules are used with the Logstash output. {pull}3929[3929]
- Properly shut down crawler in case one prospector is misconfigured. {pull}4037[4037]
*Metricbeat*
@ -357,6 +970,18 @@ https://github.com/elastic/beats/compare/v5.3.1...v5.4.0[View commits]
- Add DNS dashboard for an overview the DNS traffic. {pull}3883[3883]
- Add DNS Tunneling dashboard to highlight domains with large numbers of subdomains or high data volume. {pull}3884[3884]
[[release-notes-5.3.2]]
=== Beats version 5.3.2
https://github.com/elastic/beats/compare/v5.3.1...v5.3.2[View commits]
==== Bugfixes
*Filebeat*
- Properly shut down crawler in case one prospector is misconfigured. {pull}4037[4037]
- Fix panic in JSON decoding code if the input line is "null". {pull}4042[4042]
[[release-notes-5.3.1]]
=== Beats version 5.3.1
https://github.com/elastic/beats/compare/v5.3.0...v5.3.1[View commits]
@ -366,6 +991,7 @@ https://github.com/elastic/beats/compare/v5.3.0...v5.3.1[View commits]
*Affecting all Beats*
- Fix panic when testing regex-AST to match against date patterns. {issue}3889[3889]
- Fix panic due to race condition in kafka output. {pull}4098[4098]
*Filebeat*
@ -376,6 +1002,7 @@ https://github.com/elastic/beats/compare/v5.3.0...v5.3.1[View commits]
- Avoid errors when some Apache status fields are missing. {issue}3074[3074]
[[release-notes-5.3.0]]
=== Beats version 5.3.0
https://github.com/elastic/beats/compare/v5.2.2...v5.3.0[View commits]
@ -401,10 +1028,6 @@ https://github.com/elastic/beats/compare/v5.2.2...v5.3.0[View commits]
*Affecting all Beats*
- Add `_id`, `_type`, `_index` and `_score` fields in the generated index pattern. {pull}3282[3282]
- Fix potential elasticsearch output URL parsing error if protocol scheme is missing. {pull}3671[3671]
- Improve error message when downloading the dashboards fails. {pull}3805[3805]
- Downgrade Elasticsearch per batch item failure log to debug level. {issue}3953[3953]
- Fix panic due to race condition in kafka output. {pull}4098[4098]
*Filebeat*
- Always use absolute path for event and registry. {pull}3328[3328]
@ -416,12 +1039,6 @@ https://github.com/elastic/beats/compare/v5.2.2...v5.3.0[View commits]
- Add error handling to system process metricset for when Linux cgroups are missing from the kernel. {pull}3692[3692]
- Add labels to the Docker healthcheck metricset output. {pull}3707[3707]
- Make system process metricset honor the cpu_ticks config option. {issue}3590[3590]
- Support common.Time in mapstriface.toTime() {pull}3812[3812]
- Fixing panic on prometheus collector when label has , {pull}3947[3947]
- Fix MongoDB dbstats fields mapping. {pull}4025[4025]
*Packetbeat*
*Winlogbeat*
@ -447,7 +1064,11 @@ https://github.com/elastic/beats/compare/v5.2.2...v5.3.0[View commits]
- The `symlinks` and `harverster_limit` settings are now GA, instead of experimental. {pull}3525[3525]
- close_timeout is also applied when the output is blocking. {pull}3511[3511]
- Improve handling of different path variants on Windows. {pull}3781[3781]
- Add multiline.flush_pattern option, for specifying the 'end' of a multiline pattern {pull}4019[4019]
*Heartbeat*
- Add `tags`, `fields` and `fields_under_root` in monitors configuration. {pull}3623[3623]
*Metricbeat*
@ -473,6 +1094,7 @@ https://github.com/elastic/beats/compare/v5.2.2...v5.3.0[View commits]
- The experimental `publish_async` option is now deprecated and is planned to be removed in 6.0. {pull}3525[3525]
[[release-notes-5.2.2]]
=== Beats version 5.2.2
https://github.com/elastic/beats/compare/v5.2.1...v5.2.2[View commits]
@ -552,6 +1174,7 @@ https://github.com/elastic/beats/compare/v5.1.2...v5.2.0[View commits]
- Experimental Prometheus module. {pull}3202[3202]
- Add system socket module that reports all TCP sockets. {pull}3246[3246]
- Kafka consumer groups metricset. {pull}3240[3240]
- Add jolokia module with dynamic jmx metricset. {pull}3570[3570]
*Winlogbeat*
@ -609,6 +1232,8 @@ https://github.com/elastic/beats/compare/v5.0.2...v5.1.1[View commits]
- Add add_cloud_metadata processor for collecting cloud provider metadata. {pull}2728[2728]
- Added decode_json_fields processor for decoding fields containing JSON strings. {pull}2605[2605]
- Add Tencent Cloud provider for add_cloud_metadata processor. {pull}4023[4023]
- Add Alibaba Cloud provider for add_cloud_metadata processor. {pull}4111[4111]
*Metricbeat*

View File

@ -11,108 +11,7 @@ There are many ways to contribute, from writing tutorials or blog posts,
improving the documentation, submitting bug reports and feature requests or
writing code for implementing a whole new protocol.
If you have a bugfix or new feature that you would like to contribute, please
start by opening a topic on the [forums](https://discuss.elastic.co/c/beats).
It may be that somebody is already working on it, or that there are particular
issues that you should know about before implementing the change.
We enjoy working with contributors to get their code accepted. There are many
approaches to fixing a problem and it is important to find the best approach
before writing too much code.
The process for contributing to any of the Elastic repositories is similar.
## Contribution Steps
1. Please make sure you have signed our [Contributor License
Agreement](https://www.elastic.co/contributor-agreement/). We are not
asking you to assign copyright to us, but to give us the right to distribute
your code without restriction. We ask this of all contributors in order to
assure our users of the origin and continuing existence of the code. You
only need to sign the CLA once.
2. Send a pull request! Push your changes to your fork of the repository and
[submit a pull
request](https://help.github.com/articles/using-pull-requests). In the pull
request, describe what your changes do and mention any bugs/issues related
to the pull request.
## Adding a new Beat
If you want to create a new Beat, please read our [developer
guide](https://www.elastic.co/guide/en/beats/libbeat/current/new-beat.html).
You don't need to submit the code to this repository. Most new Beats start in
their own repository and just make use of the libbeat packages. After you have
a working Beat that you'd like to share with others, open a PR to add it to our
list of [community
Beats](https://github.com/elastic/beats/blob/master/libbeat/docs/communitybeats.asciidoc).
## Setting up your dev environment
The Beats are Go programs, so install the latest version of
[golang](http://golang.org/) if you don't have it already. The current Go version
used for development is Golang 1.7.6.
The location where you clone is important. Please clone under the source
directory of your `GOPATH`. If you don't have `GOPATH` already set, you can
simply set it to your home directory (`export GOPATH=$HOME`).
$ mkdir -p ${GOPATH}/src/github.com/elastic
$ cd ${GOPATH}/src/github.com/elastic
$ git clone https://github.com/elastic/beats.git
Note: If you have multiple go paths use `${GOPATH%%:*}`instead of `${GOPATH}`.
Then you can compile a particular Beat by using the Makefile. For example, for
Packetbeat:
$ cd beats/packetbeat
$ make
Some of the Beats might have extra development requirements, in which case you'll find a
CONTRIBUTING.md file in the Beat directory.
## Update scripts
The Beats use a variety of scripts based on Python to generate configuration files
and documentation. The command used for this is:
$ make update
This command has the following dependencies:
* Python >=2.7.9
* [virtualenv](https://virtualenv.pypa.io/en/latest/) for Python
Virtualenv can be installed with the command `easy_install virtualenv` or `pip install virtualenv`.
More details can be found [here](https://virtualenv.pypa.io/en/latest/installation.html).
## Testing
You can run the whole testsuite with the following command:
$ make testsuite
Running the testsuite has the following requirements:
* Python >=2.7.9
* Docker >=1.10.0
* Docker-compose >= 1.8.0
## Documentation
The documentation for each Beat is located under {beatname}/docs and is based on asciidoc. After changing the docs,
you should verify that the docs are still building to avoid breaking the automated docs build. To build the docs run
`make docs`. If you want to preview the docs for a specific Beat, run `make docs-preview`
inside the folder for the Beat. This will automatically open your browser with the docs for preview.
## Dependencies
To manage the `vendor/` folder we use
[glide](https://github.com/Masterminds/glide), which uses
[glide.yaml](glide.yaml) as a manifest file for the dependencies. Please see
the glide documentation on how to add or update vendored dependencies.
If you want to contribute to the Beats project, you can start by reading
the [contributing guidelines](https://www.elastic.co/guide/en/beats/devguide/current/beats-contributing.html)
in the _Beats Developer Guide_.

View File

@ -1,15 +0,0 @@
FROM golang:1.7.6
MAINTAINER Nicolas Ruflin <ruflin@elastic.co>
RUN set -x && \
apt-get update && \
apt-get install -y netcat && \
apt-get clean
COPY libbeat/scripts/docker-entrypoint.sh /entrypoint.sh
RUN mkdir -p /etc/pki/tls/certs
COPY testing/environments/docker/logstash/pki/tls/certs/logstash.crt /etc/pki/tls/certs/logstash.crt
# Create a copy of the repository inside the container.
COPY . /go/src/github.com/elastic/beats/

View File

@ -1,13 +0,0 @@
Copyright (c) 20122016 Elasticsearch <http://www.elastic.co>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

13
vendor/github.com/elastic/beats/LICENSE.txt generated vendored Normal file
View File

@ -0,0 +1,13 @@
Copyright (c) 20122017 Elastic <http://www.elastic.co>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,102 +1,125 @@
BUILD_DIR=build
COVERAGE_DIR=${BUILD_DIR}/coverage
BEATS=packetbeat filebeat winlogbeat metricbeat heartbeat
PROJECTS=libbeat ${BEATS}
BUILD_DIR=$(CURDIR)/build
COVERAGE_DIR=$(BUILD_DIR)/coverage
BEATS=packetbeat filebeat winlogbeat metricbeat heartbeat auditbeat
PROJECTS=libbeat $(BEATS)
PROJECTS_ENV=libbeat filebeat metricbeat
SNAPSHOT?=yes
PYTHON_ENV?=$(BUILD_DIR)/python-env
VIRTUALENV_PARAMS?=
FIND=find . -type f -not -path "*/vendor/*" -not -path "*/build/*" -not -path "*/.git/*"
GOLINT=golint
GOLINT_REPO=github.com/golang/lint/golint
REVIEWDOG=reviewdog
REVIEWDOG_OPTIONS?=-diff "git diff master"
REVIEWDOG_REPO=github.com/haya14busa/reviewdog/cmd/reviewdog
# Runs complete testsuites (unit, system, integration) for all beats with coverage and race detection.
# Also it builds the docs and the generators
.PHONY: testsuite
testsuite:
$(foreach var,$(PROJECTS),$(MAKE) -C $(var) testsuite || exit 1;)
#$(MAKE) -C generator test
@$(foreach var,$(PROJECTS),$(MAKE) -C $(var) testsuite || exit 1;)
.PHONY: setup-commit-hook
setup-commit-hook:
@cp script/pre_commit.sh .git/hooks/pre-commit
@chmod 751 .git/hooks/pre-commit
stop-environments:
$(foreach var,$(PROJECTS_ENV),$(MAKE) -C $(var) stop-environment || exit 0;)
@$(foreach var,$(PROJECTS_ENV),$(MAKE) -C $(var) stop-environment || exit 0;)
# Runs unit and system tests without coverage and race detection.
.PHONY: test
test:
$(foreach var,$(PROJECTS),$(MAKE) -C $(var) test || exit 1;)
@$(foreach var,$(PROJECTS),$(MAKE) -C $(var) test || exit 1;)
# Runs unit tests without coverage and race detection.
.PHONY: unit
unit:
$(foreach var,$(PROJECTS),$(MAKE) -C $(var) unit || exit 1;)
@$(foreach var,$(PROJECTS),$(MAKE) -C $(var) unit || exit 1;)
.PHONY: coverage-report
coverage-report:
mkdir -p ${COVERAGE_DIR}
# Writes atomic mode on top of file
echo 'mode: atomic' > ./${COVERAGE_DIR}/full.cov
# Collects all coverage files and skips top line with mode
-tail -q -n +2 ./filebeat/${COVERAGE_DIR}/*.cov >> ./${COVERAGE_DIR}/full.cov
-tail -q -n +2 ./packetbeat/${COVERAGE_DIR}/*.cov >> ./${COVERAGE_DIR}/full.cov
-tail -q -n +2 ./winlogbeat/${COVERAGE_DIR}/*.cov >> ./${COVERAGE_DIR}/full.cov
-tail -q -n +2 ./libbeat/${COVERAGE_DIR}/*.cov >> ./${COVERAGE_DIR}/full.cov
go tool cover -html=./${COVERAGE_DIR}/full.cov -o ${COVERAGE_DIR}/full.html
@mkdir -p $(COVERAGE_DIR)
@echo 'mode: atomic' > ./$(COVERAGE_DIR)/full.cov
@# Collects all coverage files and skips top line with mode
@$(foreach var,$(PROJECTS),tail -q -n +2 ./$(var)/$(COVERAGE_DIR)/*.cov >> ./$(COVERAGE_DIR)/full.cov || true;)
@go tool cover -html=./$(COVERAGE_DIR)/full.cov -o $(COVERAGE_DIR)/full.html
@echo "Generated coverage report $(COVERAGE_DIR)/full.html"
.PHONY: update
update:
$(foreach var,$(PROJECTS),$(MAKE) -C $(var) update || exit 1;)
update: notice
@$(foreach var,$(PROJECTS),$(MAKE) -C $(var) update || exit 1;)
@$(MAKE) -C deploy/kubernetes all
.PHONY: clean
clean:
rm -rf build
$(foreach var,$(PROJECTS),$(MAKE) -C $(var) clean || exit 1;)
$(MAKE) -C generator clean
@rm -rf build
@$(foreach var,$(PROJECTS),$(MAKE) -C $(var) clean || exit 1;)
@$(MAKE) -C generator clean
# Cleans up the vendor directory from unnecessary files
# This should always be run after updating the dependencies
.PHONY: clean-vendor
clean-vendor:
sh script/clean_vendor.sh
@sh script/clean_vendor.sh
.PHONY: check
check:
$(foreach var,$(PROJECTS),$(MAKE) -C $(var) check || exit 1;)
# Validate that all updates were committed
$(MAKE) update
git update-index --refresh
git diff-index --exit-code HEAD --
check: python-env
@$(foreach var,$(PROJECTS),$(MAKE) -C $(var) check || exit 1;)
@# Checks also python files which are not part of the beats
@$(FIND) -name *.py -exec $(PYTHON_ENV)/bin/autopep8 -d --max-line-length 120 {} \; | (! grep . -q) || (echo "Code differs from autopep8's style" && false)
@# Validate that all updates were committed
@$(MAKE) update
@git diff | cat
@git update-index --refresh
@git diff-index --exit-code HEAD --
# Corrects spelling errors
.PHONY: misspell
misspell:
go get github.com/client9/misspell
# Ignore Kibana files (.json)
$(FIND) -not -path "*.json" -name '*' -exec misspell -w {} \;
.PHONY: fmt
fmt:
$(foreach var,$(PROJECTS),$(MAKE) -C $(var) fmt || exit 1;)
fmt: python-env
@$(foreach var,$(PROJECTS),$(MAKE) -C $(var) fmt || exit 1;)
@# Cleans also python files which are not part of the beats
@$(FIND) -name "*.py" -exec $(PYTHON_ENV)/bin/autopep8 --in-place --max-line-length 120 {} \;
.PHONY: simplify
simplify:
$(foreach var,$(PROJECTS),$(MAKE) -C $(var) simplify || exit 1;)
.PHONY: lint
lint:
@go get $(GOLINT_REPO) $(REVIEWDOG_REPO)
$(REVIEWDOG) $(REVIEWDOG_OPTIONS)
# Collects all dashboards and generates dashboard folder for https://github.com/elastic/beats-dashboards/tree/master/dashboards
.PHONY: beats-dashboards
beats-dashboards:
mkdir -p build/dashboards
$(foreach var,$(BEATS),cp -r $(var)/_meta/kibana/ build/dashboards/$(var) || exit 1;)
@mkdir -p build/dashboards
@$(foreach var,$(BEATS),cp -r $(var)/_meta/kibana/ build/dashboards/$(var) || exit 1;)
# Builds the documents for each beat
.PHONY: docs
docs:
sh libbeat/scripts/build_docs.sh ${PROJECTS}
@$(foreach var,$(PROJECTS),BUILD_DIR=${BUILD_DIR} $(MAKE) -C $(var) docs || exit 1;)
sh ./script/build_docs.sh dev-guide github.com/elastic/beats/docs/devguide ${BUILD_DIR}
.PHONY: package
package: update beats-dashboards
$(foreach var,$(BEATS),SNAPSHOT=$(SNAPSHOT) $(MAKE) -C $(var) package || exit 1;)
@$(foreach var,$(BEATS),SNAPSHOT=$(SNAPSHOT) $(MAKE) -C $(var) package || exit 1;)
# build the dashboards package
echo "Start building the dashboards package"
mkdir -p build/upload/
BUILD_DIR=${shell pwd}/build SNAPSHOT=$(SNAPSHOT) $(MAKE) -C dev-tools/packer package-dashboards ${shell pwd}/build/upload/build_id.txt
mv build/upload build/dashboards-upload
@echo "Start building the dashboards package"
@mkdir -p build/upload/
@BUILD_DIR=${BUILD_DIR} SNAPSHOT=$(SNAPSHOT) $(MAKE) -C dev-tools/packer package-dashboards ${BUILD_DIR}/upload/build_id.txt
@mv build/upload build/dashboards-upload
# Copy build files over to top build directory
mkdir -p build/upload/
$(foreach var,$(BEATS),cp -r $(var)/build/upload/ build/upload/$(var) || exit 1;)
cp -r build/dashboards-upload build/upload/dashboards
# Run tests on the generated packages.
go test ./dev-tools/package_test.go -files "${shell pwd}/build/upload/*/*"
@# Copy build files over to top build directory
@mkdir -p build/upload/
@$(foreach var,$(BEATS),cp -r $(var)/build/upload/ build/upload/$(var) || exit 1;)
@cp -r build/dashboards-upload build/upload/dashboards
@# Run tests on the generated packages.
@go test ./dev-tools/package_test.go -files "${BUILD_DIR}/upload/*/*"
# Upload nightly builds to S3
.PHONY: upload-nightlies-s3
@ -116,5 +139,17 @@ upload-release:
aws s3 cp --recursive --acl public-read build/upload s3://download.elasticsearch.org/beats/
.PHONY: notice
notice:
python dev-tools/generate_notice.py .
notice: python-env
@echo "Generating NOTICE"
@$(PYTHON_ENV)/bin/python dev-tools/generate_notice.py .
# Sets up the virtual python environment
.PHONY: python-env
python-env:
@test -d $(PYTHON_ENV) || virtualenv $(VIRTUALENV_PARAMS) $(PYTHON_ENV)
@$(PYTHON_ENV)/bin/pip install -q --upgrade pip autopep8 six
# Tests if apm works with the current code
.PHONY: test-apm
test-apm:
sh ./script/test_apm.sh

1994
vendor/github.com/elastic/beats/NOTICE generated vendored

File diff suppressed because it is too large Load Diff

3959
vendor/github.com/elastic/beats/NOTICE.txt generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -27,7 +27,7 @@ Beat | Description
[Winlogbeat](https://github.com/elastic/beats/tree/master/winlogbeat) | Fetches and ships Windows Event logs
In addition to the above Beats, which are officially supported by
[Elastic](elastic.co), the
[Elastic](https://elastic.co), the
community has created a set of other Beats that make use of libbeat but live
outside of this Github repository. We maintain a list of community Beats
[here](https://www.elastic.co/guide/en/beats/libbeat/master/community-beats.html).
@ -67,7 +67,7 @@ Please start by reading our [CONTRIBUTING](CONTRIBUTING.md) file.
If you are creating a new Beat, you don't need to submit the code to this
repository. You can simply start working in a new repository and make use of
the libbeat packages, by following our [developer
guide](https://www.elastic.co/guide/en/beats/libbeat/master/new-beat.html).
guide](https://www.elastic.co/guide/en/beats/libbeat/current/new-beat.html).
After you have a working prototype, open a pull request to add your Beat to the
list of [community
Beats](https://github.com/elastic/beats/blob/master/libbeat/docs/communitybeats.asciidoc).

View File

@ -55,6 +55,15 @@ cd ~/go/src/github.com/elastic
if [ -d "/vagrant" ]; then ln -s /vagrant beats; fi
SCRIPT
# Linux GVM
$linuxGvmProvision = <<SCRIPT
mkdir -p ~/bin
curl -sL -o ~/bin/gvm https://github.com/andrewkroh/gvm/releases/download/v0.0.1/gvm-linux-amd64
chmod +x ~/bin/gvm
echo 'export PATH=~/bin:$PATH' >> ~/.bash_profile
echo 'eval "$(gvm 1.9.2)"' >> ~/.bash_profile
SCRIPT
Vagrant.configure(2) do |config|
# Windows Server 2012 R2
@ -92,6 +101,7 @@ Vagrant.configure(2) do |config|
config.vm.synced_folder ".", "/vagrant", id: "vagrant-root", :nfs => true, disabled: true
#config.vm.network "private_network", ip: "192.168.135.18"
freebsd.vm.hostname = "beats-tester"
freebsd.vm.provision "shell", inline: $unixProvision, privileged: false
end
@ -109,6 +119,18 @@ Vagrant.configure(2) do |config|
openbsd.vm.provision "shell", inline: $unixProvision, privileged: false
end
# CentOS 7
config.vm.define "centos7", primary: true do |centos7|
#centos7.vm.box = "http://cloud.centos.org/centos/7/vagrant/x86_64/images/CentOS-7-x86_64-Vagrant-1706_02.VirtualBox.box"
centos7.vm.box = "ubuntu/precise64"
centos7.vm.network :forwarded_port, guest: 22, host: 2226, id: "ssh", auto_correct: true
centos7.vm.provision "shell", inline: $unixProvision, privileged: false
centos7.vm.provision "shell", inline: $linuxGvmProvision, privileged: false
centos7.vm.synced_folder ".", "/vagrant", type: "virtualbox"
end
end
# -*- mode: ruby -*-

10
vendor/github.com/elastic/beats/auditbeat/.gitignore generated vendored Normal file
View File

@ -0,0 +1,10 @@
build
_meta/kibana
_meta/beat.yml
_meta/beat.reference.yml
module/*/_meta/config.yml
/auditbeat
/auditbeat.test
/docs/html_docs

77
vendor/github.com/elastic/beats/auditbeat/Makefile generated vendored Normal file
View File

@ -0,0 +1,77 @@
BEAT_NAME=auditbeat
BEAT_TITLE=Auditbeat
BEAT_DESCRIPTION=Audit the activities of users and processes on your system.
SYSTEM_TESTS=false
TEST_ENVIRONMENT=false
# Path to the libbeat Makefile
-include ../libbeat/scripts/Makefile
# This is called by the beats packer before building starts
.PHONY: before-build
before-build:
@cat ${ES_BEATS}/auditbeat/_meta/common.p1.yml \
<(go run scripts/generate_config.go -os windows -concat) \
${ES_BEATS}/auditbeat/_meta/common.p2.yml \
${ES_BEATS}/libbeat/_meta/config.yml > \
${PREFIX}/${BEAT_NAME}-win.yml
@cat ${ES_BEATS}/auditbeat/_meta/common.reference.yml \
<(go run scripts/generate_config.go -os windows -concat) \
${ES_BEATS}/libbeat/_meta/config.reference.yml > \
${PREFIX}/${BEAT_NAME}-win.reference.yml
@cat ${ES_BEATS}/auditbeat/_meta/common.p1.yml \
<(go run scripts/generate_config.go -os darwin -concat) \
${ES_BEATS}/auditbeat/_meta/common.p2.yml \
${ES_BEATS}/libbeat/_meta/config.yml > \
${PREFIX}/${BEAT_NAME}-darwin.yml
@cat ${ES_BEATS}/auditbeat/_meta/common.reference.yml \
<(go run scripts/generate_config.go -os darwin -concat) \
${ES_BEATS}/libbeat/_meta/config.reference.yml > \
${PREFIX}/${BEAT_NAME}-darwin.reference.yml
@cat ${ES_BEATS}/auditbeat/_meta/common.p1.yml \
<(go run scripts/generate_config.go -os linux -concat) \
${ES_BEATS}/auditbeat/_meta/common.p2.yml \
${ES_BEATS}/libbeat/_meta/config.yml > \
${PREFIX}/${BEAT_NAME}-linux.yml
@cat ${ES_BEATS}/auditbeat/_meta/common.reference.yml \
<(go run scripts/generate_config.go -os linux -concat) \
${ES_BEATS}/libbeat/_meta/config.reference.yml > \
${PREFIX}/${BEAT_NAME}-linux.reference.yml
# Collects all dependencies and then calls update
.PHONY: collect
collect: fields collect-docs configs kibana
# Collects all module and metricset fields
.PHONY: fields
fields: python-env
@mkdir -p _meta
@cp ${ES_BEATS}/metricbeat/_meta/fields.common.yml _meta/fields.generated.yml
@${PYTHON_ENV}/bin/python ${ES_BEATS}/metricbeat/scripts/fields_collector.py >> _meta/fields.generated.yml
# Collects all module configs
.PHONY: configs
configs: python-env
@cat ${ES_BEATS}/auditbeat/_meta/common.p1.yml \
<(go run scripts/generate_config.go -os linux -concat) \
${ES_BEATS}/auditbeat/_meta/common.p2.yml > _meta/beat.yml
@cat ${ES_BEATS}/auditbeat/_meta/common.reference.yml \
<(go run scripts/generate_config.go -os linux -ref -concat) > _meta/beat.reference.yml
# Collects all module docs
.PHONY: collect-docs
collect-docs: python-env
@rm -rf docs/modules
@mkdir -p docs/modules
@go run scripts/generate_config.go -os linux
@${PYTHON_ENV}/bin/python ${ES_BEATS}/auditbeat/scripts/docs_collector.py --beat ${BEAT_NAME}
# Collects all module dashboards
.PHONY: kibana
kibana:
@-rm -rf _meta/kibana/dashboard _meta/kibana/search _meta/kibana/visualization # Skip index-pattern
@mkdir -p _meta/kibana
@-cp -pr module/*/_meta/kibana _meta/

View File

@ -0,0 +1,12 @@
###################### Auditbeat Configuration Example #########################
# This is an example configuration file highlighting only the most common
# options. The auditbeat.reference.yml file from the same directory contains all
# the supported options with more comments. You can use it as a reference.
#
# You can find the full configuration reference here:
# https://www.elastic.co/guide/en/beats/auditbeat/index.html
#========================== Modules configuration =============================
auditbeat.modules:

View File

@ -0,0 +1,6 @@
#==================== Elasticsearch template setting ==========================
setup.template.settings:
index.number_of_shards: 3
#index.codec: best_compression
#_source.enabled: false

View File

@ -0,0 +1,31 @@
########################## Auditbeat Configuration #############################
# This is a reference configuration file documenting all non-deprecated options
# in comments. For a shorter configuration example that contains only the most
# common options, please see auditbeat.yml in the same directory.
#
# You can find the full configuration reference here:
# https://www.elastic.co/guide/en/beats/auditbeat/index.html
#============================ Config Reloading ================================
# Config reloading allows to dynamically load modules. Each file which is
# monitored must contain one or multiple modules as a list.
auditbeat.config.modules:
# Glob pattern for configuration reloading
path: ${path.config}/conf.d/*.yml
# Period on which files under path should be checked for changes
reload.period: 10s
# Set to true to enable config reloading
reload.enabled: false
# Maximum amount of time to randomly delay the start of a metricset. Use 0 to
# disable startup delay.
auditbeat.max_start_delay: 10s
#========================== Modules configuration =============================
auditbeat.modules:

View File

@ -0,0 +1,36 @@
- key: common
title: Common
description: >
Contains common fields available in all event types.
fields:
- name: metricset.module
description: >
The name of the module that generated the event.
- name: metricset.name
description: >
The name of the metricset that generated the event.
- name: metricset.host
description: >
Hostname of the machine from which the metricset was collected. This
field may not be present when the data was collected locally.
- name: metricset.rtt
type: long
required: true
description: >
Event round trip time in microseconds.
- name: metricset.namespace
type: keyword
description: >
Namespace of dynamic metricsets.
- name: type
required: true
example: metricsets
description: >
The document type. Always set to "metricsets".

View File

@ -0,0 +1,872 @@
########################## Auditbeat Configuration #############################
# This is a reference configuration file documenting all non-deprecated options
# in comments. For a shorter configuration example that contains only the most
# common options, please see auditbeat.yml in the same directory.
#
# You can find the full configuration reference here:
# https://www.elastic.co/guide/en/beats/auditbeat/index.html
#============================ Config Reloading ================================
# Config reloading allows to dynamically load modules. Each file which is
# monitored must contain one or multiple modules as a list.
auditbeat.config.modules:
# Glob pattern for configuration reloading
path: ${path.config}/conf.d/*.yml
# Period on which files under path should be checked for changes
reload.period: 10s
# Set to true to enable config reloading
reload.enabled: false
# Maximum amount of time to randomly delay the start of a metricset. Use 0 to
# disable startup delay.
auditbeat.max_start_delay: 10s
#========================== Modules configuration =============================
auditbeat.modules:
# The kernel metricset collects events from the audit framework in the Linux
# kernel. You need to specify audit rules for the events that you want to audit.
- module: audit
metricsets: [kernel]
kernel.resolve_ids: true
kernel.failure_mode: silent
kernel.backlog_limit: 8196
kernel.rate_limit: 0
kernel.include_raw_message: false
kernel.include_warnings: false
kernel.audit_rules: |
## Define audit rules here.
## Create file watches (-w) or syscall audits (-a or -A). Uncomment these
## examples or add your own rules.
## If you are on a 64 bit platform, everything should be running
## in 64 bit mode. This rule will detect any use of the 32 bit syscalls
## because this might be a sign of someone exploiting a hole in the 32
## bit API.
#-a always,exit -F arch=b32 -S all -F key=32bit-abi
## Executions.
#-a always,exit -F arch=b64 -S execve,execveat -k exec
## External access.
#-a always,exit -F arch=b64 -S accept,bind,connect,recvfrom -F key=external-access
## Identity changes.
#-w /etc/group -p wa -k identity
#-w /etc/passwd -p wa -k identity
#-w /etc/gshadow -p wa -k identity
## Unauthorized access attempts.
#-a always,exit -F arch=b64 -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EACCES -k access
#-a always,exit -F arch=b64 -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EPERM -k access
# The file integrity metricset sends events when files are changed (created,
# updated, deleted). The events contain file metadata and hashes.
- module: audit
metricsets: [file]
file.paths:
- /bin
- /usr/bin
- /sbin
- /usr/sbin
- /etc
# Scan over the configured file paths at startup and send events for new or
# modified files since the last time Auditbeat was running.
file.scan_at_start: true
# Average scan rate. This throttles the amount of CPU and I/O that Auditbeat
# consumes at startup while scanning. Default is "50 MiB".
file.scan_rate_per_sec: 50 MiB
# Limit on the size of files that will be hashed. Default is "100 MiB".
file.max_file_size: 100 MiB
# Hash types to compute when the file changes. Supported types are md5, sha1,
# sha224, sha256, sha384, sha512, sha512_224, sha512_256, sha3_224, sha3_256,
# sha3_384 and sha3_512. Default is sha1.
file.hash_types: [sha1]
#================================ General ======================================
# The name of the shipper that publishes the network data. It can be used to group
# all the transactions sent by a single shipper in the web interface.
# If this options is not defined, the hostname is used.
#name:
# The tags of the shipper are included in their own field with each
# transaction published. Tags make it easy to group servers by different
# logical properties.
#tags: ["service-X", "web-tier"]
# Optional fields that you can specify to add additional information to the
# output. Fields can be scalar values, arrays, dictionaries, or any nested
# combination of these.
#fields:
# env: staging
# If this option is set to true, the custom fields are stored as top-level
# fields in the output document instead of being grouped under a fields
# sub-dictionary. Default is false.
#fields_under_root: false
# Internal queue configuration for buffering events to be published.
#queue:
# Queue type by name (default 'mem')
# The memory queue will present all available events (up to the outputs
# bulk_max_size) to the output, the moment the output is ready to server
# another batch of events.
#mem:
# Max number of events the queue can buffer.
#events: 4096
# Hints the minimum number of events stored in the queue,
# before providing a batch of events to the outputs.
# A value of 0 (the default) ensures events are immediately available
# to be sent to the outputs.
#flush.min_events: 2048
# Maximum duration after which events are available to the outputs,
# if the number of events stored in the queue is < min_flush_events.
#flush.timeout: 1s
# Sets the maximum number of CPUs that can be executing simultaneously. The
# default is the number of logical CPUs available in the system.
#max_procs:
#================================ Processors ===================================
# Processors are used to reduce the number of fields in the exported event or to
# enhance the event with external metadata. This section defines a list of
# processors that are applied one by one and the first one receives the initial
# event:
#
# event -> filter1 -> event1 -> filter2 ->event2 ...
#
# The supported processors are drop_fields, drop_event, include_fields, and
# add_cloud_metadata.
#
# For example, you can use the following processors to keep the fields that
# contain CPU load percentages, but remove the fields that contain CPU ticks
# values:
#
#processors:
#- include_fields:
# fields: ["cpu"]
#- drop_fields:
# fields: ["cpu.user", "cpu.system"]
#
# The following example drops the events that have the HTTP response code 200:
#
#processors:
#- drop_event:
# when:
# equals:
# http.code: 200
#
# The following example enriches each event with metadata from the cloud
# provider about the host machine. It works on EC2, GCE, DigitalOcean,
# Tencent Cloud, and Alibaba Cloud.
#
#processors:
#- add_cloud_metadata: ~
#
# The following example enriches each event with the machine's local time zone
# offset from UTC.
#
#processors:
#- add_locale:
# format: offset
#
# The following example enriches each event with docker metadata, it matches
# given fields to an existing container id and adds info from that container:
#
#processors:
#- add_docker_metadata:
# host: "unix:///var/run/docker.sock"
# match_fields: ["system.process.cgroup.id"]
# # To connect to Docker over TLS you must specify a client and CA certificate.
# #ssl:
# # certificate_authority: "/etc/pki/root/ca.pem"
# # certificate: "/etc/pki/client/cert.pem"
# # key: "/etc/pki/client/cert.key"
#
# The following example enriches each event with docker metadata, it matches
# container id from log path available in `source` field (by default it expects
# it to be /var/lib/docker/containers/*/*.log).
#
#processors:
#- add_docker_metadata: ~
#============================= Elastic Cloud ==================================
# These settings simplify using auditbeat with the Elastic Cloud (https://cloud.elastic.co/).
# The cloud.id setting overwrites the `output.elasticsearch.hosts` and
# `setup.kibana.host` options.
# You can find the `cloud.id` in the Elastic Cloud web UI.
#cloud.id:
# The cloud.auth setting overwrites the `output.elasticsearch.username` and
# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.
#cloud.auth:
#================================ Outputs ======================================
# Configure what output to use when sending the data collected by the beat.
#-------------------------- Elasticsearch output -------------------------------
output.elasticsearch:
# Boolean flag to enable or disable the output module.
#enabled: true
# Array of hosts to connect to.
# Scheme and port can be left out and will be set to the default (http and 9200)
# In case you specify and additional path, the scheme is required: http://localhost:9200/path
# IPv6 addresses should always be defined as: https://[2001:db8::1]:9200
hosts: ["localhost:9200"]
# Set gzip compression level.
#compression_level: 0
# Optional protocol and basic auth credentials.
#protocol: "https"
#username: "elastic"
#password: "changeme"
# Dictionary of HTTP parameters to pass within the url with index operations.
#parameters:
#param1: value1
#param2: value2
# Number of workers per Elasticsearch host.
#worker: 1
# Optional index name. The default is "auditbeat" plus date
# and generates [auditbeat-]YYYY.MM.DD keys.
# In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly.
#index: "auditbeat-%{[beat.version]}-%{+yyyy.MM.dd}"
# Optional ingest node pipeline. By default no pipeline will be used.
#pipeline: ""
# Optional HTTP Path
#path: "/elasticsearch"
# Custom HTTP headers to add to each request
#headers:
# X-My-Header: Contents of the header
# Proxy server url
#proxy_url: http://proxy:3128
# The number of times a particular Elasticsearch index operation is attempted. If
# the indexing operation doesn't succeed after this many retries, the events are
# dropped. The default is 3.
#max_retries: 3
# The maximum number of events to bulk in a single Elasticsearch bulk API index request.
# The default is 50.
#bulk_max_size: 50
# Configure http request timeout before failing an request to Elasticsearch.
#timeout: 90
# Use SSL settings for HTTPS. Default is true.
#ssl.enabled: true
# Configure SSL verification mode. If `none` is configured, all server hosts
# and certificates will be accepted. In this mode, SSL based connections are
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
# `full`.
#ssl.verification_mode: full
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
# 1.2 are enabled.
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
# SSL configuration. By default is off.
# List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
#ssl.key: "/etc/pki/client/cert.key"
# Optional passphrase for decrypting the Certificate Key.
#ssl.key_passphrase: ''
# Configure cipher suites to be used for SSL connections
#ssl.cipher_suites: []
# Configure curve types for ECDHE based cipher suites
#ssl.curve_types: []
# Configure what types of renegotiation are supported. Valid options are
# never, once, and freely. Default is never.
#ssl.renegotiation: never
#----------------------------- Logstash output ---------------------------------
#output.logstash:
# Boolean flag to enable or disable the output module.
#enabled: true
# The Logstash hosts
#hosts: ["localhost:5044"]
# Number of workers per Logstash host.
#worker: 1
# Set gzip compression level.
#compression_level: 3
# Optional maximum time to live for a connection to Logstash, after which the
# connection will be re-established. A value of `0s` (the default) will
# disable this feature.
#
# Not yet supported for async connections (i.e. with the "pipelining" option set)
#ttl: 30s
# Optional load balance the events between the Logstash hosts. Default is false.
#loadbalance: false
# Number of batches to be sent asynchronously to logstash while processing
# new batches.
#pipelining: 5
# If enabled only a subset of events in a batch of events is transferred per
# transaction. The number of events to be sent increases up to `bulk_max_size`
# if no error is encountered.
#slow_start: false
# Optional index name. The default index name is set to auditbeat
# in all lowercase.
#index: 'auditbeat'
# SOCKS5 proxy server URL
#proxy_url: socks5://user:password@socks5-server:2233
# Resolve names locally when using a proxy server. Defaults to false.
#proxy_use_local_resolver: false
# Enable SSL support. SSL is automatically enabled, if any SSL setting is set.
#ssl.enabled: true
# Configure SSL verification mode. If `none` is configured, all server hosts
# and certificates will be accepted. In this mode, SSL based connections are
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
# `full`.
#ssl.verification_mode: full
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
# 1.2 are enabled.
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
# Optional SSL configuration options. SSL is off by default.
# List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
#ssl.key: "/etc/pki/client/cert.key"
# Optional passphrase for decrypting the Certificate Key.
#ssl.key_passphrase: ''
# Configure cipher suites to be used for SSL connections
#ssl.cipher_suites: []
# Configure curve types for ECDHE based cipher suites
#ssl.curve_types: []
# Configure what types of renegotiation are supported. Valid options are
# never, once, and freely. Default is never.
#ssl.renegotiation: never
#------------------------------- Kafka output ----------------------------------
#output.kafka:
# Boolean flag to enable or disable the output module.
#enabled: true
# The list of Kafka broker addresses from where to fetch the cluster metadata.
# The cluster metadata contain the actual Kafka brokers events are published
# to.
#hosts: ["localhost:9092"]
# The Kafka topic used for produced events. The setting can be a format string
# using any event field. To set the topic from document type use `%{[type]}`.
#topic: beats
# The Kafka event key setting. Use format string to create unique event key.
# By default no event key will be generated.
#key: ''
# The Kafka event partitioning strategy. Default hashing strategy is `hash`
# using the `output.kafka.key` setting or randomly distributes events if
# `output.kafka.key` is not configured.
#partition.hash:
# If enabled, events will only be published to partitions with reachable
# leaders. Default is false.
#reachable_only: false
# Configure alternative event field names used to compute the hash value.
# If empty `output.kafka.key` setting will be used.
# Default value is empty list.
#hash: []
# Authentication details. Password is required if username is set.
#username: ''
#password: ''
# Kafka version auditbeat is assumed to run against. Defaults to the oldest
# supported stable version (currently version 0.8.2.0)
#version: 0.8.2
# Metadata update configuration. Metadata do contain leader information
# deciding which broker to use when publishing.
#metadata:
# Max metadata request retry attempts when cluster is in middle of leader
# election. Defaults to 3 retries.
#retry.max: 3
# Waiting time between retries during leader elections. Default is 250ms.
#retry.backoff: 250ms
# Refresh metadata interval. Defaults to every 10 minutes.
#refresh_frequency: 10m
# The number of concurrent load-balanced Kafka output workers.
#worker: 1
# The number of times to retry publishing an event after a publishing failure.
# After the specified number of retries, the events are typically dropped.
# Some Beats, such as Filebeat, ignore the max_retries setting and retry until
# all events are published. Set max_retries to a value less than 0 to retry
# until all events are published. The default is 3.
#max_retries: 3
# The maximum number of events to bulk in a single Kafka request. The default
# is 2048.
#bulk_max_size: 2048
# The number of seconds to wait for responses from the Kafka brokers before
# timing out. The default is 30s.
#timeout: 30s
# The maximum duration a broker will wait for number of required ACKs. The
# default is 10s.
#broker_timeout: 10s
# The number of messages buffered for each Kafka broker. The default is 256.
#channel_buffer_size: 256
# The keep-alive period for an active network connection. If 0s, keep-alives
# are disabled. The default is 0 seconds.
#keep_alive: 0
# Sets the output compression codec. Must be one of none, snappy and gzip. The
# default is gzip.
#compression: gzip
# The maximum permitted size of JSON-encoded messages. Bigger messages will be
# dropped. The default value is 1000000 (bytes). This value should be equal to
# or less than the broker's message.max.bytes.
#max_message_bytes: 1000000
# The ACK reliability level required from broker. 0=no response, 1=wait for
# local commit, -1=wait for all replicas to commit. The default is 1. Note:
# If set to 0, no ACKs are returned by Kafka. Messages might be lost silently
# on error.
#required_acks: 1
# The configurable ClientID used for logging, debugging, and auditing
# purposes. The default is "beats".
#client_id: beats
# Enable SSL support. SSL is automatically enabled, if any SSL setting is set.
#ssl.enabled: true
# Optional SSL configuration options. SSL is off by default.
# List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Configure SSL verification mode. If `none` is configured, all server hosts
# and certificates will be accepted. In this mode, SSL based connections are
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
# `full`.
#ssl.verification_mode: full
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
# 1.2 are enabled.
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
# Certificate for SSL client authentication
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
#ssl.key: "/etc/pki/client/cert.key"
# Optional passphrase for decrypting the Certificate Key.
#ssl.key_passphrase: ''
# Configure cipher suites to be used for SSL connections
#ssl.cipher_suites: []
# Configure curve types for ECDHE based cipher suites
#ssl.curve_types: []
# Configure what types of renegotiation are supported. Valid options are
# never, once, and freely. Default is never.
#ssl.renegotiation: never
#------------------------------- Redis output ----------------------------------
#output.redis:
# Boolean flag to enable or disable the output module.
#enabled: true
# The list of Redis servers to connect to. If load balancing is enabled, the
# events are distributed to the servers in the list. If one server becomes
# unreachable, the events are distributed to the reachable servers only.
#hosts: ["localhost:6379"]
# The Redis port to use if hosts does not contain a port number. The default
# is 6379.
#port: 6379
# The name of the Redis list or channel the events are published to. The
# default is auditbeat.
#key: auditbeat
# The password to authenticate with. The default is no authentication.
#password:
# The Redis database number where the events are published. The default is 0.
#db: 0
# The Redis data type to use for publishing events. If the data type is list,
# the Redis RPUSH command is used. If the data type is channel, the Redis
# PUBLISH command is used. The default value is list.
#datatype: list
# The number of workers to use for each host configured to publish events to
# Redis. Use this setting along with the loadbalance option. For example, if
# you have 2 hosts and 3 workers, in total 6 workers are started (3 for each
# host).
#worker: 1
# If set to true and multiple hosts or workers are configured, the output
# plugin load balances published events onto all Redis hosts. If set to false,
# the output plugin sends all events to only one host (determined at random)
# and will switch to another host if the currently selected one becomes
# unreachable. The default value is true.
#loadbalance: true
# The Redis connection timeout in seconds. The default is 5 seconds.
#timeout: 5s
# The number of times to retry publishing an event after a publishing failure.
# After the specified number of retries, the events are typically dropped.
# Some Beats, such as Filebeat, ignore the max_retries setting and retry until
# all events are published. Set max_retries to a value less than 0 to retry
# until all events are published. The default is 3.
#max_retries: 3
# The maximum number of events to bulk in a single Redis request or pipeline.
# The default is 2048.
#bulk_max_size: 2048
# The URL of the SOCKS5 proxy to use when connecting to the Redis servers. The
# value must be a URL with a scheme of socks5://.
#proxy_url:
# This option determines whether Redis hostnames are resolved locally when
# using a proxy. The default value is false, which means that name resolution
# occurs on the proxy server.
#proxy_use_local_resolver: false
# Enable SSL support. SSL is automatically enabled, if any SSL setting is set.
#ssl.enabled: true
# Configure SSL verification mode. If `none` is configured, all server hosts
# and certificates will be accepted. In this mode, SSL based connections are
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
# `full`.
#ssl.verification_mode: full
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
# 1.2 are enabled.
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
# Optional SSL configuration options. SSL is off by default.
# List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
#ssl.key: "/etc/pki/client/cert.key"
# Optional passphrase for decrypting the Certificate Key.
#ssl.key_passphrase: ''
# Configure cipher suites to be used for SSL connections
#ssl.cipher_suites: []
# Configure curve types for ECDHE based cipher suites
#ssl.curve_types: []
# Configure what types of renegotiation are supported. Valid options are
# never, once, and freely. Default is never.
#ssl.renegotiation: never
#------------------------------- File output -----------------------------------
#output.file:
# Boolean flag to enable or disable the output module.
#enabled: true
# Path to the directory where to save the generated files. The option is
# mandatory.
#path: "/tmp/auditbeat"
# Name of the generated files. The default is `auditbeat` and it generates
# files: `auditbeat`, `auditbeat.1`, `auditbeat.2`, etc.
#filename: auditbeat
# Maximum size in kilobytes of each file. When this size is reached, and on
# every auditbeat restart, the files are rotated. The default value is 10240
# kB.
#rotate_every_kb: 10000
# Maximum number of files under path. When this number of files is reached,
# the oldest file is deleted and the rest are shifted from last to first. The
# default is 7 files.
#number_of_files: 7
# Permissions to use for file creation. The default is 0600.
#permissions: 0600
#----------------------------- Console output ---------------------------------
#output.console:
# Boolean flag to enable or disable the output module.
#enabled: true
# Pretty print json event
#pretty: false
#================================= Paths ======================================
# The home path for the auditbeat installation. This is the default base path
# for all other path settings and for miscellaneous files that come with the
# distribution (for example, the sample dashboards).
# If not set by a CLI flag or in the configuration file, the default for the
# home path is the location of the binary.
#path.home:
# The configuration path for the auditbeat installation. This is the default
# base path for configuration files, including the main YAML configuration file
# and the Elasticsearch template file. If not set by a CLI flag or in the
# configuration file, the default for the configuration path is the home path.
#path.config: ${path.home}
# The data path for the auditbeat installation. This is the default base path
# for all the files in which auditbeat needs to store its data. If not set by a
# CLI flag or in the configuration file, the default for the data path is a data
# subdirectory inside the home path.
#path.data: ${path.home}/data
# The logs path for a auditbeat installation. This is the default location for
# the Beat's log files. If not set by a CLI flag or in the configuration file,
# the default for the logs path is a logs subdirectory inside the home path.
#path.logs: ${path.home}/logs
#============================== Dashboards =====================================
# These settings control loading the sample dashboards to the Kibana index. Loading
# the dashboards are disabled by default and can be enabled either by setting the
# options here, or by using the `-setup` CLI flag or the `setup` command.
#setup.dashboards.enabled: false
# The directory from where to read the dashboards. The default is the `kibana`
# folder in the home path.
#setup.dashboards.directory: ${path.home}/kibana
# The URL from where to download the dashboards archive. It is used instead of
# the directory if it has a value.
#setup.dashboards.url:
# The file archive (zip file) from where to read the dashboards. It is used instead
# of the directory when it has a value.
#setup.dashboards.file:
# In case the archive contains the dashboards from multiple Beats, this lets you
# select which one to load. You can load all the dashboards in the archive by
# setting this to the empty string.
#setup.dashboards.beat: auditbeat
# The name of the Kibana index to use for setting the configuration. Default is ".kibana"
#setup.dashboards.kibana_index: .kibana
# The Elasticsearch index name. This overwrites the index name defined in the
# dashboards and index pattern. Example: testbeat-*
#setup.dashboards.index:
# Always use the Kibana API for loading the dashboards instead of autodetecting
# how to install the dashboards by first querying Elasticsearch.
#setup.dashboards.always_kibana: false
#============================== Template =====================================
# A template is used to set the mapping in Elasticsearch
# By default template loading is enabled and the template is loaded.
# These settings can be adjusted to load your own template or overwrite existing ones.
# Set to false to disable template loading.
#setup.template.enabled: true
# Template name. By default the template name is "auditbeat-%{[beat.version]}"
# The template name and pattern has to be set in case the elasticsearch index pattern is modified.
#setup.template.name: "auditbeat-%{[beat.version]}"
# Template pattern. By default the template pattern is "-%{[beat.version]}-*" to apply to the default index settings.
# The first part is the version of the beat and then -* is used to match all daily indices.
# The template name and pattern has to be set in case the elasticsearch index pattern is modified.
#setup.template.pattern: "auditbeat-%{[beat.version]}-*"
# Path to fields.yml file to generate the template
#setup.template.fields: "${path.config}/fields.yml"
# Overwrite existing template
#setup.template.overwrite: false
# Elasticsearch template settings
setup.template.settings:
# A dictionary of settings to place into the settings.index dictionary
# of the Elasticsearch template. For more details, please check
# https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html
#index:
#number_of_shards: 1
#codec: best_compression
#number_of_routing_shards: 30
# A dictionary of settings for the _source field. For more details, please check
# https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html
#_source:
#enabled: false
#============================== Kibana =====================================
# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
# This requires a Kibana endpoint configuration.
setup.kibana:
# Kibana Host
# Scheme and port can be left out and will be set to the default (http and 5601)
# In case you specify and additional path, the scheme is required: http://localhost:5601/path
# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
#host: "localhost:5601"
# Optional protocol and basic auth credentials.
#protocol: "https"
#username: "elastic"
#password: "changeme"
# Optional HTTP Path
#path: ""
# Use SSL settings for HTTPS. Default is true.
#ssl.enabled: true
# Configure SSL verification mode. If `none` is configured, all server hosts
# and certificates will be accepted. In this mode, SSL based connections are
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
# `full`.
#ssl.verification_mode: full
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
# 1.2 are enabled.
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
# SSL configuration. By default is off.
# List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
#ssl.key: "/etc/pki/client/cert.key"
# Optional passphrase for decrypting the Certificate Key.
#ssl.key_passphrase: ''
# Configure cipher suites to be used for SSL connections
#ssl.cipher_suites: []
# Configure curve types for ECDHE based cipher suites
#ssl.curve_types: []
#================================ Logging ======================================
# There are three options for the log output: syslog, file, stderr.
# Under Windows systems, the log files are per default sent to the file output,
# under all other system per default to syslog.
# Sets log level. The default log level is info.
# Available log levels are: critical, error, warning, info, debug
#logging.level: info
# Enable debug output for selected components. To enable all selectors use ["*"]
# Other available selectors are "beat", "publish", "service"
# Multiple selectors can be chained.
#logging.selectors: [ ]
# Send all logging output to syslog. The default is false.
#logging.to_syslog: true
# If enabled, auditbeat periodically logs its internal metrics that have changed
# in the last period. For each metric that changed, the delta from the value at
# the beginning of the period is logged. Also, the total values for
# all non-zero internal metrics are logged on shutdown. The default is true.
#logging.metrics.enabled: true
# The period after which to log the internal metrics. The default is 30s.
#logging.metrics.period: 30s
# Logging to rotating files. Set logging.to_files to false to disable logging to
# files.
logging.to_files: true
logging.files:
# Configure the path where the logs are written. The default is the logs directory
# under the home path (the binary location).
#path: /var/log/auditbeat
# The name of the files where the logs are written to.
#name: auditbeat
# Configure log file size limit. If limit is reached, log file will be
# automatically rotated
#rotateeverybytes: 10485760 # = 10MB
# Number of rotated log files to keep. Oldest files will be deleted first.
#keepfiles: 7
# The permissions mask to apply when rotating log files. The default value is 0600.
# Must be a valid Unix-style file permissions mask expressed in octal notation.
#permissions: 0600
# Set to true to log messages in json format.
#logging.json: false

149
vendor/github.com/elastic/beats/auditbeat/auditbeat.yml generated vendored Normal file
View File

@ -0,0 +1,149 @@
###################### Auditbeat Configuration Example #########################
# This is an example configuration file highlighting only the most common
# options. The auditbeat.reference.yml file from the same directory contains all
# the supported options with more comments. You can use it as a reference.
#
# You can find the full configuration reference here:
# https://www.elastic.co/guide/en/beats/auditbeat/index.html
#========================== Modules configuration =============================
auditbeat.modules:
- module: audit
metricsets: [kernel]
kernel.audit_rules: |
## Define audit rules here.
## Create file watches (-w) or syscall audits (-a or -A). Uncomment these
## examples or add your own rules.
## If you are on a 64 bit platform, everything should be running
## in 64 bit mode. This rule will detect any use of the 32 bit syscalls
## because this might be a sign of someone exploiting a hole in the 32
## bit API.
#-a always,exit -F arch=b32 -S all -F key=32bit-abi
## Executions.
#-a always,exit -F arch=b64 -S execve,execveat -k exec
## External access.
#-a always,exit -F arch=b64 -S accept,bind,connect,recvfrom -F key=external-access
## Identity changes.
#-w /etc/group -p wa -k identity
#-w /etc/passwd -p wa -k identity
#-w /etc/gshadow -p wa -k identity
## Unauthorized access attempts.
#-a always,exit -F arch=b64 -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EACCES -k access
#-a always,exit -F arch=b64 -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EPERM -k access
- module: audit
metricsets: [file]
file.paths:
- /bin
- /usr/bin
- /sbin
- /usr/sbin
- /etc
#==================== Elasticsearch template setting ==========================
setup.template.settings:
index.number_of_shards: 3
#index.codec: best_compression
#_source.enabled: false
#================================ General =====================================
# The name of the shipper that publishes the network data. It can be used to group
# all the transactions sent by a single shipper in the web interface.
#name:
# The tags of the shipper are included in their own field with each
# transaction published.
#tags: ["service-X", "web-tier"]
# Optional fields that you can specify to add additional information to the
# output.
#fields:
# env: staging
#============================== Dashboards =====================================
# These settings control loading the sample dashboards to the Kibana index. Loading
# the dashboards is disabled by default and can be enabled either by setting the
# options here, or by using the `-setup` CLI flag or the `setup` command.
#setup.dashboards.enabled: false
# The URL from where to download the dashboards archive. By default this URL
# has a value which is computed based on the Beat name and version. For released
# versions, this URL points to the dashboard archive on the artifacts.elastic.co
# website.
#setup.dashboards.url:
#============================== Kibana =====================================
# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
# This requires a Kibana endpoint configuration.
setup.kibana:
# Kibana Host
# Scheme and port can be left out and will be set to the default (http and 5601)
# In case you specify and additional path, the scheme is required: http://localhost:5601/path
# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
#host: "localhost:5601"
#============================= Elastic Cloud ==================================
# These settings simplify using auditbeat with the Elastic Cloud (https://cloud.elastic.co/).
# The cloud.id setting overwrites the `output.elasticsearch.hosts` and
# `setup.kibana.host` options.
# You can find the `cloud.id` in the Elastic Cloud web UI.
#cloud.id:
# The cloud.auth setting overwrites the `output.elasticsearch.username` and
# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.
#cloud.auth:
#================================ Outputs =====================================
# Configure what output to use when sending the data collected by the beat.
#-------------------------- Elasticsearch output ------------------------------
output.elasticsearch:
# Array of hosts to connect to.
hosts: ["localhost:9200"]
# Optional protocol and basic auth credentials.
#protocol: "https"
#username: "elastic"
#password: "changeme"
#----------------------------- Logstash output --------------------------------
#output.logstash:
# The Logstash hosts
#hosts: ["localhost:5044"]
# Optional SSL. By default is off.
# List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
#ssl.key: "/etc/pki/client/cert.key"
#================================ Logging =====================================
# Sets log level. The default log level is info.
# Available log levels are: critical, error, warning, info, debug
#logging.level: debug
# At debug level, you can selectively enable logging only for some components.
# To enable all selectors use ["*"]. Examples of other selectors are "beat",
# "publish", "service".
#logging.selectors: ["*"]

20
vendor/github.com/elastic/beats/auditbeat/cmd/root.go generated vendored Normal file
View File

@ -0,0 +1,20 @@
package cmd
import (
"github.com/spf13/pflag"
"github.com/elastic/beats/metricbeat/beater"
cmd "github.com/elastic/beats/libbeat/cmd"
)
// Name of the beat (auditbeat).
const Name = "auditbeat"
// RootCmd for running auditbeat.
var RootCmd *cmd.BeatsRootCmd
func init() {
var runFlags = pflag.NewFlagSet(Name, pflag.ExitOnError)
RootCmd = cmd.GenRootCmdWithRunFlags(Name, "", beater.New, runFlags)
}

View File

@ -0,0 +1,174 @@
package datastore
import (
"io"
"os"
"sync"
"github.com/boltdb/bolt"
"github.com/elastic/beats/libbeat/paths"
)
var (
initDatastoreOnce sync.Once
ds *boltDatastore
)
// OpenBucket returns a new Bucket that stores data in {path.data}/beat.db.
// The returned Bucket must be closed when finished to ensure all resources
// are released.
func OpenBucket(name string) (Bucket, error) {
initDatastoreOnce.Do(func() {
ds = &boltDatastore{
path: paths.Resolve(paths.Data, "beat.db"),
mode: 0600,
}
})
return ds.OpenBucket(name)
}
// Datastore
type Datastore interface {
OpenBucket(name string) (Bucket, error)
}
type boltDatastore struct {
mutex sync.Mutex
useCount uint32
path string
mode os.FileMode
db *bolt.DB
}
func New(path string, mode os.FileMode) Datastore {
return &boltDatastore{path: path, mode: mode}
}
func (ds *boltDatastore) OpenBucket(bucket string) (Bucket, error) {
ds.mutex.Lock()
defer ds.mutex.Unlock()
// Initialize the Bolt DB.
if ds.db == nil {
var err error
ds.db, err = bolt.Open(ds.path, ds.mode, nil)
if err != nil {
return nil, err
}
}
// Ensure the name exists.
err := ds.db.Update(func(tx *bolt.Tx) error {
_, err := tx.CreateBucketIfNotExists([]byte(bucket))
return err
})
if err != nil {
return nil, err
}
return &boltBucket{ds, bucket}, nil
}
func (ds *boltDatastore) done() {
ds.mutex.Lock()
defer ds.mutex.Unlock()
if ds.useCount > 0 {
ds.useCount--
if ds.useCount == 0 {
ds.db.Close()
ds.db = nil
}
}
}
// Bucket
type Bucket interface {
io.Closer
Load(key string, f func(blob []byte) error) error
Store(key string, blob []byte) error
Delete(key string) error // Delete removes a key from the bucket. If the key does not exist then nothing is done and a nil error is returned.
DeleteBucket() error // Deletes and closes the bucket.
}
// BoltBucket is a Bucket that exposes some Bolt specific APIs.
type BoltBucket interface {
Bucket
View(func(tx *bolt.Bucket) error) error
Update(func(tx *bolt.Bucket) error) error
}
type boltBucket struct {
ds *boltDatastore
name string
}
func (b *boltBucket) Load(key string, f func(blob []byte) error) error {
return b.ds.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(b.name))
data := b.Get([]byte(key))
if data == nil {
return nil
}
return f(data)
})
}
func (b *boltBucket) Store(key string, blob []byte) error {
return b.ds.db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(b.name))
return b.Put([]byte(key), blob)
})
}
func (b *boltBucket) ForEach(f func(key string, blob []byte) error) error {
return b.ds.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(b.name))
return b.ForEach(func(k, v []byte) error {
return f(string(k), v)
})
})
}
func (b *boltBucket) Delete(key string) error {
return b.ds.db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(b.name))
return b.Delete([]byte(key))
})
}
func (b *boltBucket) DeleteBucket() error {
err := b.ds.db.Update(func(tx *bolt.Tx) error {
return tx.DeleteBucket([]byte(b.name))
})
b.Close()
return err
}
func (b *boltBucket) View(f func(*bolt.Bucket) error) error {
return b.ds.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(b.name))
return f(b)
})
}
func (b *boltBucket) Update(f func(*bolt.Bucket) error) error {
return b.ds.db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(b.name))
return f(b)
})
}
func (b *boltBucket) Close() error {
b.ds.done()
b.ds = nil
return nil
}

View File

@ -0,0 +1,6 @@
[[filtering-and-enhancing-data]]
== Filter and enhance the exported data
include::../../libbeat/docs/processors.asciidoc[]
include::../../libbeat/docs/processors-using.asciidoc[]

View File

@ -0,0 +1,7 @@
[[configuration-general-options]]
== Specify general settings
You can specify settings in the +{beatname_lc}.yml+ config file to control the
general behavior of {beatname_uc}.
include::../../libbeat/docs/generalconfig.asciidoc[]

View File

@ -0,0 +1,33 @@
[id="configuration-{beatname_lc}"]
== Specify which modules to run
To enable specific modules and metricsets, you add entries to the
`auditbeat.modules` list in the +{beatname_lc}.yml+ config file. Each entry in
the list begins with a dash (-) and is followed by settings for that module.
The following example shows a configuration that runs the `audit` module with
the `kernel` and `file` metricsets enabled:
[source,yaml]
----
auditbeat.modules:
- module: audit
metricsets: [kernel]
kernel.audit_rules: |
-w /etc/passwd -p wa -k identity
-a always,exit -F arch=b32 -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EPERM -k access
- module: audit
metricsets: [file]
file.paths:
- /bin
- /usr/bin
- /sbin
- /usr/sbin
- /etc
----
The configuration details vary by module. See the
<<{beatname_lc}-modules,module documentation>> for more detail about
configuring the available modules and metricsets.

View File

@ -0,0 +1,77 @@
[id="configuring-howto-{beatname_lc}"]
= Configuring {beatname_uc}
[partintro]
--
Before modifying configuration settings, make sure you've completed the
<<{beatname_lc}-configuration,configuration steps>> in the Getting Started.
This section describes some common use cases for changing configuration options.
To configure {beatname_uc}, you edit the configuration file. For rpm and deb,
youll find the configuration file at +/etc/{beatname_lc}/{beatname_lc}.yml+.
There's also a full example configuration file at
+/etc/{beatname_lc}/{beatname_lc}.reference.yml+ that shows all non-deprecated
options. For mac and win, look in the archive that you extracted.
The {beatname_uc} configuration file uses http://yaml.org/[YAML] for its syntax.
See the {libbeat}/config-file-format.html[Config File Format] section of the
_Beats Platform Reference_ for more about the structure of the config file.
The following topics describe how to configure {beatname_uc}:
* <<configuration-{beatname_lc}>>
* <<configuration-general-options>>
* <<{beatname_lc}-configuration-reloading>>
* <<configuring-internal-queue>>
* <<configuring-output>>
* <<configuration-ssl>>
* <<filtering-and-enhancing-data>>
* <<configuring-ingest-node>>
* <<configuration-path>>
* <<setup-kibana-endpoint>>
* <<configuration-dashboards>>
* <<configuration-template>>
* <<configuration-logging>>
* <<using-environ-vars>>
* <<yaml-tips>>
* <<{beatname_lc}-reference-yml>>
After changing configuration settings, you need to restart {beatname_uc} to
pick up the changes.
--
include::./auditbeat-modules-config.asciidoc[]
include::./auditbeat-general-options.asciidoc[]
include::./reload-configuration.asciidoc[]
:allplatforms:
include::../../libbeat/docs/queueconfig.asciidoc[]
include::../../libbeat/docs/outputconfig.asciidoc[]
include::../../libbeat/docs/shared-ssl-config.asciidoc[]
include::./auditbeat-filtering.asciidoc[]
include::../../libbeat/docs/shared-config-ingest.asciidoc[]
include::../../libbeat/docs/shared-path-config.asciidoc[]
include::../../libbeat/docs/shared-kibana-config.asciidoc[]
include::../../libbeat/docs/setup-config.asciidoc[]
include::../../libbeat/docs/loggingconfig.asciidoc[]
:standalone:
include::../../libbeat/docs/shared-env-vars.asciidoc[]
:standalone:
:allplatforms:
include::../../libbeat/docs/yaml.asciidoc[]
include::../../libbeat/docs/reference-yml.asciidoc[]

View File

@ -0,0 +1,29 @@
[float]
[[ulimit]]
=== {beatname_uc} fails to watch folders because too many files are open?
Because of the way file monitoring is implemented on macOS, you may see a
warning similar to the following:
[source,shell]
----
eventreader_fsnotify.go:42: WARN [audit.file] Failed to watch /usr/bin: too many
open files (check the max number of open files allowed with 'ulimit -a')
----
To resolve this issue, run {beatname_uc} with the `ulimit` set to a larger
value, for example:
["source","sh",subs="attributes"]
----
sudo sh -c 'ulimit -n 8192 && ./{beatname_uc} -e
----
Or:
["source","sh",subs="attributes"]
----
sudo su
ulimit -n 8192
./{beatname_lc} -e
----

View File

@ -0,0 +1,12 @@
[[faq]]
== Frequently asked questions
This section contains frequently asked questions about {beatname_uc}. Also check
out the
https://discuss.elastic.co/c/beats/{beatname_lc}[{beatname_uc} discussion forum].
include::./faq-ulimit.asciidoc[]
include::../../libbeat/docs/faq-limit-bandwidth.asciidoc[]
include::../../libbeat/docs/shared-faq.asciidoc[]

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,294 @@
[id="{beatname_lc}-getting-started"]
== Getting started with {beatname_uc}
To get started with your own {beatname_uc} setup, install and configure these
related products:
* Elasticsearch for storage and indexing the data.
* Kibana for the UI.
* Logstash (optional) for inserting data into Elasticsearch.
See {libbeat}/getting-started.html[Getting Started with Beats and the Elastic Stack]
for more information.
After installing the Elastic Stack, read the following topics to learn how to
install, configure, and run {beatname_uc}:
* <<{beatname_lc}-installation>>
* <<{beatname_lc}-configuration>>
* <<{beatname_lc}-template>>
* <<load-kibana-dashboards>>
* <<{beatname_lc}-starting>>
* <<view-kibana-dashboards>>
* <<setup-repositories>>
[id="{beatname_lc}-installation"]
=== Step 1: Install {beatname_uc}
You should install {beatname_uc} on all the servers you want to monitor.
include::../../libbeat/docs/shared-download-and-install.asciidoc[]
[[deb]]
*deb:*
ifeval::["{release-state}"=="unreleased"]
Version {stack-version} of {beatname_uc} has not yet been released.
endif::[]
ifeval::["{release-state}"!="unreleased"]
["source","sh",subs="attributes"]
------------------------------------------------
curl -L -O https://artifacts.elastic.co/downloads/beats/{beatname_lc}/{beatname_lc}-{version}-amd64.deb
sudo dpkg -i {beatname_lc}-{version}-amd64.deb
------------------------------------------------
endif::[]
[[rpm]]
*rpm:*
ifeval::["{release-state}"=="unreleased"]
Version {stack-version} of {beatname_uc} has not yet been released.
endif::[]
ifeval::["{release-state}"!="unreleased"]
["source","sh",subs="attributes"]
------------------------------------------------
curl -L -O https://artifacts.elastic.co/downloads/beats/{beatname_lc}/{beatname_lc}-{version}-x86_64.rpm
sudo rpm -vi {beatname_lc}-{version}-x86_64.rpm
------------------------------------------------
endif::[]
[[mac]]
*mac:*
ifeval::["{release-state}"=="unreleased"]
Version {stack-version} of {beatname_uc} has not yet been released.
endif::[]
ifeval::["{release-state}"!="unreleased"]
["source","sh",subs="attributes"]
------------------------------------------------
curl -L -O https://artifacts.elastic.co/downloads/beats/{beatname_lc}/{beatname_lc}-{version}-darwin-x86_64.tar.gz
tar xzvf {beatname_lc}-{version}-darwin-x86_64.tar.gz
------------------------------------------------
endif::[]
[[docker]]
*docker:*
ifeval::["{release-state}"=="unreleased"]
Version {stack-version} of {beatname_uc} has not yet been released.
endif::[]
ifeval::["{release-state}"!="unreleased"]
["source", "shell", subs="attributes"]
------------------------------------------------
docker pull {dockerimage}
------------------------------------------------
endif::[]
[[win]]
*win:*
ifeval::["{release-state}"=="unreleased"]
Version {stack-version} of {beatname_uc} has not yet been released.
endif::[]
ifeval::["{release-state}"!="unreleased"]
. Download the {beatname_uc} Windows zip file from the
https://www.elastic.co/downloads/beats/{beatname_lc}[downloads page].
. Extract the contents of the zip file into `C:\Program Files`.
. Rename the +{beatname_lc}-<version>-windows+ directory to +{beatname_uc}+.
. Open a PowerShell prompt as an Administrator (right-click the PowerShell icon
and select *Run As Administrator*). If you are running Windows XP, you may need
to download and install PowerShell.
. From the PowerShell prompt, run the following commands to install {beatname_uc}
as a Windows service:
+
["source","sh",subs="attributes"]
----------------------------------------------------------------------
PS > cd 'C:{backslash}Program Files{backslash}{beatname_uc}'
PS C:{backslash}Program Files{backslash}{beatname_uc}> .{backslash}install-service-{beatname_lc}.ps1
----------------------------------------------------------------------
NOTE: If script execution is disabled on your system, you need to set the
execution policy for the current session to allow the script to run. For
example: +PowerShell.exe -ExecutionPolicy UnRestricted -File
.\install-service-{beatname_lc}.ps1+.
endif::[]
Before starting {beatname_uc}, you should look at the configuration options in the
configuration file, for example +C:{backslash}Program Files{backslash}{beatname_uc}{backslash}{beatname_lc}.yml+.
For more information about these options, see
<<configuring-howto-{beatname_lc}>>.
[id="{beatname_lc}-configuration"]
=== Step 2: Configure {beatname_uc}
include::../../libbeat/docs/shared-configuring.asciidoc[]
To configure {beatname_uc}:
. Define the {beatname_uc} modules that you want to enable. {beatname_uc} uses
modules to collect the audit information. For each module, specify the
metricsets that you want to collect.
+
The following example shows the `file` metricset configured to generate
events whenever a file in one of the specified paths changes on disk:
+
["source","sh",subs="attributes"]
-------------------------------------
auditbeat.modules:
- module: audit
metricsets: [file]
file.paths:
- /bin
- /usr/bin
- /sbin
- /usr/sbin
- /etc
-------------------------------------
+
If you accept the default configuration without specifying additional modules,
{beatname_uc} uses a configuration that's tailored to the operating system where
{beatname_uc} is running.
+
See <<configuring-howto-{beatname_lc}>> for more details about configuring modules.
. If you are sending output to Elasticsearch (and not using Logstash), set the
IP address and port where {beatname_uc} can find the Elasticsearch installation:
+
[source,yaml]
----------------------------------------------------------------------
output.elasticsearch:
hosts: ["127.0.0.1:9200"]
----------------------------------------------------------------------
+
If you are sending output to Logstash, make sure you
<<logstash-output,Configure the Logstash output>> instead.
include::../../libbeat/docs/step-configure-kibana-endpoint.asciidoc[]
include::../../libbeat/docs/step-configure-credentials.asciidoc[]
include::../../libbeat/docs/step-test-config.asciidoc[]
include::../../libbeat/docs/step-look-at-config.asciidoc[]
[id="{beatname_lc}-template"]
=== Step 3: Load the index template in Elasticsearch
:allplatforms:
include::../../libbeat/docs/shared-template-load.asciidoc[]
[[load-kibana-dashboards]]
=== Step 4: Set up the Kibana dashboards
:allplatforms:
include::../../libbeat/docs/dashboards.asciidoc[]
[id="{beatname_lc}-starting"]
=== Step 5: Start {beatname_uc}
Run {beatname_uc} by issuing the appropriate command for your platform. If you
are accessing a secured Elasticsearch cluster, make sure you've configured
credentials as described in <<{beatname_lc}-configuration>>.
NOTE: If you use an init.d script to start {beatname_uc} on deb or rpm, you can't
specify command line flags (see <<command-line-options>>). To specify flags,
start {beatname_uc} in the foreground.
*deb:*
["source","sh",subs="attributes"]
----------------------------------------------------------------------
sudo service {beatname_lc} start
----------------------------------------------------------------------
*rpm:*
["source","sh",subs="attributes"]
----------------------------------------------------------------------
sudo service {beatname_lc} start
----------------------------------------------------------------------
*mac:*
["source","sh",subs="attributes"]
----------------------------------------------------------------------
sudo chown root {beatname_lc}.yml <1>
sudo ./{beatname_lc} -e -c {beatname_lc}.yml -d "publish"
----------------------------------------------------------------------
<1> To monitor system files, you'll be running {beatname_uc} as root, so you
need to change ownership of the configuration file, or run {beatname_uc} with
`-strict.perms=false` specified. See
{libbeat}/config-file-permissions.html[Config File Ownership and Permissions]
in the _Beats Platform Reference_.
If you see a warning about too many open files, you need to increase the
`ulimit`. See the <<ulimit,FAQ>> for more details.
*win:*
["source","sh",subs="attributes"]
----------------------------------------------------------------------
PS C:{backslash}Program Files{backslash}{beatname_uc}> Start-Service {beatname_lc}
----------------------------------------------------------------------
By default the log files are stored in +C:{backslash}ProgramData{backslash}{beatname_lc}{backslash}Logs+.
==== Test the {beatname_uc} installation
To verify that your server's statistics are present in Elasticsearch, issue
the following command:
["source","sh",subs="attributes"]
----------------------------------------------------------------------
curl -XGET 'http://localhost:9200/{beatname_lc}-*/_search?pretty'
----------------------------------------------------------------------
Make sure that you replace `localhost:9200` with the address of your
Elasticsearch instance.
On Windows, if you don't have cURL installed, simply point your browser to the
URL.
[[view-kibana-dashboards]]
=== Step 6: View the sample Kibana dashboards
To make it easier for you to start auditing the activities of users and
processes on your system, we have created example {beatname_uc} dashboards.
You loaded the dashboards earlier when you ran the `setup` command.
include::../../libbeat/docs/opendashboards.asciidoc[]
The dashboards are provided as examples. We recommend that you
{kibana-ref}/dashboard.html[customize] them to meet your needs.
image:./images/auditbeat-file-integrity-dashboard.png[Auditbeat File Integrity Dashboard]

Binary file not shown.

After

Width:  |  Height:  |  Size: 257 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 133 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 133 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 218 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 90 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 48 KiB

View File

@ -0,0 +1,42 @@
= Auditbeat Reference
include::../../libbeat/docs/version.asciidoc[]
include::{asciidoc-dir}/../../shared/attributes.asciidoc[]
:libbeat: http://www.elastic.co/guide/en/beats/libbeat/{doc-branch}
:kibana-ref: https://www.elastic.co/guide/en/kibana/{doc-branch}
:beatsdevguide: http://www.elastic.co/guide/en/beats/devguide/{doc-branch}
:filebeat: http://www.elastic.co/guide/en/beats/filebeat/{doc-branch}
:logstashdoc: https://www.elastic.co/guide/en/logstash/{doc-branch}
:elasticsearch: https://www.elastic.co/guide/en/elasticsearch/reference/{doc-branch}
:securitydoc: https://www.elastic.co/guide/en/x-pack/{doc-branch}
:monitoringdoc: https://www.elastic.co/guide/en/x-pack/{doc-branch}
:version: {stack-version}
:beatname_lc: auditbeat
:beatname_uc: Auditbeat
:beatname_pkg: {beatname_lc}
:security: X-Pack Security
:dockerimage: docker.elastic.co/beats/{beatname_lc}:{version}
include::./overview.asciidoc[]
include::../../libbeat/docs/contributing-to-beats.asciidoc[]
include::./getting-started.asciidoc[]
include::../../libbeat/docs/repositories.asciidoc[]
include::./setting-up-running.asciidoc[]
include::./configuring-howto.asciidoc[]
include::./modules.asciidoc[]
include::./fields.asciidoc[]
include::./securing-auditbeat.asciidoc[]
include::./troubleshooting.asciidoc[]
include::./faq.asciidoc[]

View File

@ -0,0 +1,15 @@
[id="{beatname_lc}-modules"]
= Modules
[partintro]
--
This section contains detailed information about the metric collecting modules
contained in {beatname_uc}. Each module contains one or multiple metricsets. More details
about each module can be found under the links below.
//pass macro block used here to remove Edit links from modules documentation because it is generated
pass::[<?edit_url?>]
include::modules_list.asciidoc[]

View File

@ -0,0 +1,75 @@
////
This file is generated! See scripts/docs_collector.py
////
[id="{beatname_lc}-module-audit"]
== Audit Module
The `audit` module reports security-relevant information based on data captured
from the operating system (OS) or services running on the OS. Although this
feature doesnt provide additional security to your system, it does make it
easier for you to discover and track security policy violations.
[float]
=== Example configuration
The Audit module supports the common configuration options that are
described under <<configuration-{beatname_lc},configuring {beatname_uc}>>. Here
is an example configuration:
[source,yaml]
----
auditbeat.modules:
- module: audit
metricsets: [kernel]
kernel.audit_rules: |
## Define audit rules here.
## Create file watches (-w) or syscall audits (-a or -A). Uncomment these
## examples or add your own rules.
## If you are on a 64 bit platform, everything should be running
## in 64 bit mode. This rule will detect any use of the 32 bit syscalls
## because this might be a sign of someone exploiting a hole in the 32
## bit API.
#-a always,exit -F arch=b32 -S all -F key=32bit-abi
## Executions.
#-a always,exit -F arch=b64 -S execve,execveat -k exec
## External access.
#-a always,exit -F arch=b64 -S accept,bind,connect,recvfrom -F key=external-access
## Identity changes.
#-w /etc/group -p wa -k identity
#-w /etc/passwd -p wa -k identity
#-w /etc/gshadow -p wa -k identity
## Unauthorized access attempts.
#-a always,exit -F arch=b64 -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EACCES -k access
#-a always,exit -F arch=b64 -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EPERM -k access
- module: audit
metricsets: [file]
file.paths:
- /bin
- /usr/bin
- /sbin
- /usr/sbin
- /etc
----
[float]
=== Metricsets
The following metricsets are available:
* <<{beatname_lc}-metricset-audit-file,file>>
* <<{beatname_lc}-metricset-audit-kernel,kernel>>
include::audit/file.asciidoc[]
include::audit/kernel.asciidoc[]

View File

@ -0,0 +1,19 @@
////
This file is generated! See scripts/docs_collector.py
////
[id="{beatname_lc}-metricset-audit-file"]
include::../../../module/audit/file/_meta/docs.asciidoc[]
==== Fields
For a description of each field in the metricset, see the
<<exported-fields-audit,exported fields>> section.
Here is an example document generated by this metricset:
[source,json]
----
include::../../../module/audit/file/_meta/data.json[]
----

View File

@ -0,0 +1,19 @@
////
This file is generated! See scripts/docs_collector.py
////
[id="{beatname_lc}-metricset-audit-kernel"]
include::../../../module/audit/kernel/_meta/docs.asciidoc[]
==== Fields
For a description of each field in the metricset, see the
<<exported-fields-audit,exported fields>> section.
Here is an example document generated by this metricset:
[source,json]
----
include::../../../module/audit/kernel/_meta/data.json[]
----

View File

@ -0,0 +1,10 @@
////
This file is generated! See scripts/docs_collector.py
////
* <<{beatname_lc}-module-audit,Audit>>
--
include::modules/audit.asciidoc[]

View File

@ -0,0 +1,13 @@
[id="{beatname_lc}-overview"]
== {beatname_uc} overview
++++
<titleabbrev>Overview</titleabbrev>
++++
{beatname_uc} is a lightweight shipper that you can install on your servers to
audit the activities of users and processes on your systems. For example, you
can use {beatname_uc} to collect and centralize audit events from the Linux
Audit Framework. You can also use {beatname_uc} to detect changes to critical
files, like binaries and configuration files, and identify potential security
policy violations.

View File

@ -0,0 +1,4 @@
This functionality is experimental and may be changed or removed completely in a
future release. Elastic will take a best effort approach to fix any issues, but
experimental features are not subject to the support SLA of official GA
features.

View File

@ -0,0 +1,51 @@
[id="{beatname_lc}-configuration-reloading"]
== Reload the configuration dynamically
beta[]
You can configure {beatname_uc} to dynamically reload configuration files when
there are changes. To do this, you specify a path
(https://golang.org/pkg/path/filepath/#Glob[Glob]) to watch for module
configuration changes. When the files found by the Glob change, new modules are
started/stopped according to changes in the configuration files.
To enable dynamic config reloading, you specify the `path` and `reload` options
in the main +{beatname_lc}.yml+ config file. For example:
["source","sh"]
------------------------------------------------------------------------------
auditbeat.config.modules:
path: ${path.config}/conf.d/*.yml
reload.enabled: true
reload.period: 10s
------------------------------------------------------------------------------
*`path`*:: A Glob that defines the files to check for changes.
*`reload.enabled`*:: When set to `true`, enables dynamic config reload.
*`reload.period`*:: Specifies how often the files are checked for changes. Do not
set the `period` to less than 1s because the modification time of files is often
stored in seconds. Setting the `period` to less than 1s will result in
unnecessary overhead.
Each file found by the Glob must contain a list of one or more module
definitions. For example:
[source,yaml]
------------------------------------------------------------------------------
auditbeat.modules:
- module: audit
metricsets: [file]
file.paths:
wordpress:
- /www/wordpress
- /www/wordpress/wp-admin
- /www/wordpress/wp-content
- /www/wordpress/wp-includes
------------------------------------------------------------------------------
NOTE: On systems with POSIX file permissions, all Beats configuration files are
subject to ownership and file permission checks. If you encounter config loading
errors related to file ownership, see {libbeat}/config-file-permissions.html.

View File

@ -0,0 +1,15 @@
include::../../libbeat/docs/shared-docker.asciidoc[]
[float]
==== Special requirements
Under Docker, {beatname_uc} runs as a non-root user, but requires some privileged
capabilities to operate correctly. Ensure that the +AUDIT_CONTROL+ and +AUDIT_READ+
capabilities are available to the container.
It is also essential to run {beatname_uc} in the host PID namespace.
["source","sh",subs="attributes"]
----
docker run --cap-add=AUDIT_CONTROL,AUDIT_READ --pid=host {dockerimage}
----

View File

@ -0,0 +1,27 @@
[id="securing-{beatname_lc}"]
= Securing {beatname_uc}
[partintro]
--
The following topics describe how to secure communication between {beatname_uc}
and other products in the Elastic stack:
* <<securing-communication-elasticsearch>>
* <<configuring-ssl-logstash>>
//sets block macro for https.asciidoc included in next section
--
[[securing-communication-elasticsearch]]
== Secure communication with Elasticsearch
include::../../libbeat/docs/https.asciidoc[]
//sets block macro for shared-ssl-logstash-config.asciidoc included in next section
[[configuring-ssl-logstash]]
== Secure communication with Logstash by using SSL
include::../../libbeat/docs/shared-ssl-logstash-config.asciidoc[]

View File

@ -0,0 +1,30 @@
/////
// NOTE:
// Each beat has its own setup overview to allow for the addition of content
// that is unique to each beat.
/////
[[seting-up-and-running]]
== Setting up and running {beatname_uc}
Before reading this section, see the
<<{beatname_lc}-getting-started,getting started documentation>> for basic
installation instructions to get you started.
This section includes additional information on how to set up and run
{beatname_uc}, including:
* <<directory-layout>>
* <<command-line-options>>
* <<running-on-docker>>
//MAINTAINERS: If you add a new file to this section, make sure you update the bulleted list ^^ too.
include::../../libbeat/docs/shared-directory-layout.asciidoc[]
include::../../libbeat/docs/command-reference.asciidoc[]
include::./running-on-docker.asciidoc[]

View File

@ -0,0 +1,30 @@
[[troubleshooting]]
= Troubleshooting
[partintro]
--
If you have issues installing or running {beatname_uc}, read the
following tips:
* <<getting-help>>
* <<enable-{beatname_lc}-debugging>>
* <<faq>>
//sets block macro for getting-help.asciidoc included in next section
--
[[getting-help]]
== Get Help
include::../../libbeat/docs/getting-help.asciidoc[]
//sets block macro for debugging.asciidoc included in next section
[id="enable-{beatname_lc}-debugging"]
== Debug
include::../../libbeat/docs/debugging.asciidoc[]

17
vendor/github.com/elastic/beats/auditbeat/main.go generated vendored Normal file
View File

@ -0,0 +1,17 @@
package main
import (
"os"
"github.com/elastic/beats/auditbeat/cmd"
_ "github.com/elastic/beats/auditbeat/module/audit"
_ "github.com/elastic/beats/auditbeat/module/audit/file"
_ "github.com/elastic/beats/auditbeat/module/audit/kernel"
)
func main() {
if err := cmd.RootCmd.Execute(); err != nil {
os.Exit(1)
}
}

26
vendor/github.com/elastic/beats/auditbeat/main_test.go generated vendored Normal file
View File

@ -0,0 +1,26 @@
package main
// This file is mandatory as otherwise the auditbeat.test binary is not generated correctly.
import (
"flag"
"testing"
"github.com/elastic/beats/auditbeat/cmd"
)
var systemTest *bool
func init() {
systemTest = flag.Bool("systemTest", false, "Set to true when running system tests")
cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest"))
cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile"))
}
// Test started when the test binary is started. Only calls main.
func TestSystem(t *testing.T) {
if *systemTest {
main()
}
}

View File

@ -0,0 +1,88 @@
{{ if eq .goos "linux" -}}
{{ if .reference -}}
# The kernel metricset collects events from the audit framework in the Linux
# kernel. You need to specify audit rules for the events that you want to audit.
{{ end -}}
- module: audit
metricsets: [kernel]
{{ if .reference -}}
kernel.resolve_ids: true
kernel.failure_mode: silent
kernel.backlog_limit: 8196
kernel.rate_limit: 0
kernel.include_raw_message: false
kernel.include_warnings: false
{{ end -}}
kernel.audit_rules: |
## Define audit rules here.
## Create file watches (-w) or syscall audits (-a or -A). Uncomment these
## examples or add your own rules.
## If you are on a 64 bit platform, everything should be running
## in 64 bit mode. This rule will detect any use of the 32 bit syscalls
## because this might be a sign of someone exploiting a hole in the 32
## bit API.
#-a always,exit -F arch=b32 -S all -F key=32bit-abi
## Executions.
#-a always,exit -F arch=b64 -S execve,execveat -k exec
## External access.
#-a always,exit -F arch=b64 -S accept,bind,connect,recvfrom -F key=external-access
## Identity changes.
#-w /etc/group -p wa -k identity
#-w /etc/passwd -p wa -k identity
#-w /etc/gshadow -p wa -k identity
## Unauthorized access attempts.
#-a always,exit -F arch=b64 -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EACCES -k access
#-a always,exit -F arch=b64 -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EPERM -k access
{{ end -}}
{{ if .reference -}}
# The file integrity metricset sends events when files are changed (created,
# updated, deleted). The events contain file metadata and hashes.
{{ end -}}
- module: audit
metricsets: [file]
{{ if eq .goos "darwin" -}}
file.paths:
- /bin
- /usr/bin
- /usr/local/bin
- /sbin
- /usr/sbin
- /usr/local/sbin
{{ else if eq .goos "windows" -}}
file.paths:
- C:/windows
- C:/windows/system32
- C:/Program Files
- C:/Program Files (x86)
{{ else -}}
file.paths:
- /bin
- /usr/bin
- /sbin
- /usr/sbin
- /etc
{{ end -}}
{{ if .reference }}
# Scan over the configured file paths at startup and send events for new or
# modified files since the last time Auditbeat was running.
file.scan_at_start: true
# Average scan rate. This throttles the amount of CPU and I/O that Auditbeat
# consumes at startup while scanning. Default is "50 MiB".
file.scan_rate_per_sec: 50 MiB
# Limit on the size of files that will be hashed. Default is "100 MiB".
file.max_file_size: 100 MiB
# Hash types to compute when the file changes. Supported types are md5, sha1,
# sha224, sha256, sha384, sha512, sha512_224, sha512_256, sha3_224, sha3_256,
# sha3_384 and sha3_512. Default is sha1.
file.hash_types: [sha1]
{{- end }}

View File

@ -0,0 +1,6 @@
== Audit Module
The `audit` module reports security-relevant information based on data captured
from the operating system (OS) or services running on the OS. Although this
feature doesnt provide additional security to your system, it does make it
easier for you to discover and track security policy violations.

View File

@ -0,0 +1,11 @@
- key: audit
title: Audit
short_config: true
description: >
The `audit` module reports security-relevant information based on data
captured from the operating system (OS) or services running on the OS.
fields:
- name: audit
type: group
description: >
fields:

View File

@ -0,0 +1,13 @@
{
"hits": 0,
"timeRestore": false,
"description": "",
"title": "Auditbeat - File Integrity",
"uiStateJSON": "{\"P-1\":{\"vis\":{\"defaultColors\":{\"0 - 100\":\"rgb(0,104,55)\"}}},\"P-6\":{\"vis\":{\"defaultColors\":{\"0 - 100\":\"rgb(0,104,55)\"}}},\"P-7\":{\"vis\":{\"defaultColors\":{\"0 - 100\":\"rgb(0,104,55)\"}}},\"P-8\":{\"vis\":{\"defaultColors\":{\"0 - 100\":\"rgb(0,104,55)\"}}},\"P-9\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}}}",
"panelsJSON": "[{\"col\":1,\"id\":\"AV0tVcg6g1PYniApZa-v\",\"panelIndex\":1,\"row\":1,\"size_x\":2,\"size_y\":6,\"type\":\"visualization\"},{\"col\":3,\"id\":\"AV0tV05vg1PYniApZbA2\",\"panelIndex\":2,\"row\":1,\"size_x\":7,\"size_y\":6,\"type\":\"visualization\"},{\"col\":10,\"id\":\"AV0tWL-Yg1PYniApZbCs\",\"panelIndex\":3,\"row\":1,\"size_x\":3,\"size_y\":3,\"type\":\"visualization\"},{\"col\":10,\"id\":\"AV0tWSdXg1PYniApZbDU\",\"panelIndex\":4,\"row\":4,\"size_x\":3,\"size_y\":3,\"type\":\"visualization\"},{\"col\":5,\"id\":\"AV0tW0djg1PYniApZbGL\",\"panelIndex\":5,\"row\":9,\"size_x\":4,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"id\":\"AV0tY6jwg1PYniApZbRY\",\"panelIndex\":6,\"row\":7,\"size_x\":4,\"size_y\":2,\"type\":\"visualization\"},{\"col\":5,\"id\":\"AV0tav8Ag1PYniApZbbK\",\"panelIndex\":7,\"row\":7,\"size_x\":4,\"size_y\":2,\"type\":\"visualization\"},{\"col\":9,\"id\":\"AV0tbcUdg1PYniApZbe1\",\"panelIndex\":8,\"row\":7,\"size_x\":4,\"size_y\":2,\"type\":\"visualization\"},{\"size_x\":12,\"size_y\":5,\"panelIndex\":9,\"type\":\"visualization\",\"id\":\"AV0tc_xZg1PYniApZbnL\",\"col\":1,\"row\":12},{\"size_x\":4,\"size_y\":3,\"panelIndex\":10,\"type\":\"visualization\",\"id\":\"AV0tes4Eg1PYniApZbwV\",\"col\":9,\"row\":9},{\"size_x\":4,\"size_y\":3,\"panelIndex\":11,\"type\":\"visualization\",\"id\":\"AV0te0TCg1PYniApZbw9\",\"col\":1,\"row\":9}]",
"optionsJSON": "{\"darkTheme\":false}",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[{\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"*\"}}}],\"highlightAll\":true,\"version\":true}"
}
}

View File

@ -0,0 +1,10 @@
{
"visState": "{\"title\":\"Auditbeat - File - Events over time\",\"type\":\"histogram\",\"params\":{\"grid\":{\"categoryLines\":false,\"style\":{\"color\":\"#eee\"}},\"categoryAxes\":[{\"id\":\"CategoryAxis-1\",\"type\":\"category\",\"position\":\"bottom\",\"show\":true,\"style\":{},\"scale\":{\"type\":\"linear\"},\"labels\":{\"show\":true,\"truncate\":100},\"title\":{\"text\":\"@timestamp per 5 minutes\"}}],\"valueAxes\":[{\"id\":\"ValueAxis-1\",\"name\":\"LeftAxis-1\",\"type\":\"value\",\"position\":\"left\",\"show\":true,\"style\":{},\"scale\":{\"type\":\"linear\",\"mode\":\"normal\"},\"labels\":{\"show\":true,\"rotate\":0,\"filter\":false,\"truncate\":100},\"title\":{\"text\":\"Count\"}}],\"seriesParams\":[{\"show\":\"true\",\"type\":\"histogram\",\"mode\":\"stacked\",\"data\":{\"label\":\"Count\",\"id\":\"1\"},\"valueAxis\":\"ValueAxis-1\",\"drawLinesBetweenPoints\":true,\"showCircles\":true}],\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"times\":[],\"addTimeMarker\":false},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"date_histogram\",\"schema\":\"segment\",\"params\":{\"field\":\"@timestamp\",\"interval\":\"auto\",\"customInterval\":\"2h\",\"min_doc_count\":1,\"extended_bounds\":{}}},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"group\",\"params\":{\"field\":\"audit.file.action\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Action\"}}],\"listeners\":{}}",
"description": "",
"title": "Auditbeat - File - Events over time",
"uiStateJSON": "{}",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"auditbeat-*\",\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true}},\"filter\":[]}"
}
}

View File

@ -0,0 +1,10 @@
{
"visState": "{\"title\":\"Auditbeat - File - Action Metrics\",\"type\":\"metric\",\"params\":{\"addLegend\":false,\"addTooltip\":true,\"gauge\":{\"autoExtend\":false,\"backStyle\":\"Full\",\"colorSchema\":\"Green to Red\",\"colorsRange\":[{\"from\":0,\"to\":100}],\"gaugeColorMode\":\"None\",\"gaugeStyle\":\"Full\",\"gaugeType\":\"Metric\",\"invertColors\":false,\"labels\":{\"color\":\"black\",\"show\":true},\"orientation\":\"vertical\",\"percentageMode\":false,\"scale\":{\"color\":\"#333\",\"labels\":false,\"show\":true,\"width\":2},\"style\":{\"bgColor\":false,\"bgFill\":\"#000\",\"fontSize\":\"24\",\"labelColor\":false,\"subText\":\"\"},\"type\":\"simple\",\"useRange\":false,\"verticalSplit\":true,\"extendRange\":false},\"type\":\"gauge\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{\"customLabel\":\"Actions\"}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"group\",\"params\":{\"field\":\"audit.file.action\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}",
"description": "",
"title": "Auditbeat - File - Action Metrics",
"uiStateJSON": "{\"vis\":{\"defaultColors\":{\"0 - 100\":\"rgb(0,104,55)\"}}}",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"auditbeat-*\",\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true}},\"filter\":[]}"
}
}

View File

@ -0,0 +1,10 @@
{
"visState": "{\"title\":\"Auditbeat - File - Top updated\",\"type\":\"pie\",\"params\":{\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"isDonut\":false},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"audit.file.path\",\"size\":10,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Path\"}}],\"listeners\":{}}",
"description": "",
"title": "Auditbeat - File - Top updated",
"uiStateJSON": "{}",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"auditbeat-*\",\"query\":{\"query_string\":{\"query\":\"audit.file.action:updated OR audit.file.action:attributes_modified\",\"analyze_wildcard\":true}},\"filter\":[]}"
}
}

View File

@ -0,0 +1,10 @@
{
"visState": "{\n \"title\": \"Auditbeat - File - Top owners\",\n \"type\": \"pie\",\n \"params\": {\n \"addTooltip\": true,\n \"addLegend\": true,\n \"legendPosition\": \"right\",\n \"isDonut\": true\n },\n \"aggs\": [\n {\n \"id\": \"1\",\n \"enabled\": true,\n \"type\": \"count\",\n \"schema\": \"metric\",\n \"params\": {}\n },\n {\n \"id\": \"2\",\n \"enabled\": true,\n \"type\": \"terms\",\n \"schema\": \"segment\",\n \"params\": {\n \"field\": \"audit.file.owner\",\n \"size\": 5,\n \"order\": \"desc\",\n \"orderBy\": \"1\",\n \"customLabel\": \"Owner\"\n }\n }\n ],\n \"listeners\": {}\n}",
"description": "",
"title": "Auditbeat - File - Top owners",
"uiStateJSON": "{}",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\n \"index\": \"auditbeat-*\",\n \"query\": {\n \"query_string\": {\n \"query\": \"*\",\n \"analyze_wildcard\": true\n }\n },\n \"filter\": []\n}"
}
}

View File

@ -0,0 +1,10 @@
{
"visState": "{\n \"title\": \"Auditbeat - File - Top groups\",\n \"type\": \"pie\",\n \"params\": {\n \"addTooltip\": true,\n \"addLegend\": true,\n \"legendPosition\": \"right\",\n \"isDonut\": true\n },\n \"aggs\": [\n {\n \"id\": \"1\",\n \"enabled\": true,\n \"type\": \"count\",\n \"schema\": \"metric\",\n \"params\": {}\n },\n {\n \"id\": \"2\",\n \"enabled\": true,\n \"type\": \"terms\",\n \"schema\": \"segment\",\n \"params\": {\n \"field\": \"audit.file.group\",\n \"size\": 5,\n \"order\": \"desc\",\n \"orderBy\": \"1\",\n \"customLabel\": \"Group\"\n }\n }\n ],\n \"listeners\": {}\n}",
"description": "",
"title": "Auditbeat - File - Top groups",
"uiStateJSON": "{}",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\n \"index\": \"auditbeat-*\",\n \"query\": {\n \"query_string\": {\n \"query\": \"*\",\n \"analyze_wildcard\": true\n }\n },\n \"filter\": []\n}"
}
}

View File

@ -0,0 +1,10 @@
{
"visState": "{\"title\":\"Auditbeat - File - Top agent by count\",\"type\":\"metric\",\"params\":{\"addTooltip\":true,\"addLegend\":false,\"type\":\"gauge\",\"gauge\":{\"verticalSplit\":false,\"autoExtend\":false,\"percentageMode\":false,\"gaugeType\":\"Metric\",\"gaugeStyle\":\"Full\",\"backStyle\":\"Full\",\"orientation\":\"vertical\",\"colorSchema\":\"Green to Red\",\"gaugeColorMode\":\"None\",\"useRange\":false,\"colorsRange\":[{\"from\":0,\"to\":100}],\"invertColors\":false,\"labels\":{\"show\":true,\"color\":\"black\"},\"scale\":{\"show\":false,\"labels\":false,\"color\":\"#333\",\"width\":2},\"type\":\"simple\",\"style\":{\"fontSize\":\"23\",\"bgFill\":\"#000\",\"bgColor\":false,\"labelColor\":false,\"subText\":\"\"}}},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{\"customLabel\":\"Top agent by count\"}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"group\",\"params\":{\"field\":\"beat.hostname\",\"size\":1,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}",
"description": "",
"title": "Auditbeat - File - Top agent by count",
"uiStateJSON": "{\"vis\":{\"defaultColors\":{\"0 - 100\":\"rgb(0,104,55)\"}}}",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"auditbeat-*\",\"query\":{\"query_string\":{\"query\":\"_exists_:audit.file\",\"analyze_wildcard\":true}},\"filter\":[]}"
}
}

View File

@ -0,0 +1,10 @@
{
"visState": "{\"title\":\"Auditbeat - File - Most changed file by count\",\"type\":\"metric\",\"params\":{\"addTooltip\":true,\"addLegend\":false,\"type\":\"gauge\",\"gauge\":{\"verticalSplit\":false,\"autoExtend\":false,\"percentageMode\":false,\"gaugeType\":\"Metric\",\"gaugeStyle\":\"Full\",\"backStyle\":\"Full\",\"orientation\":\"vertical\",\"colorSchema\":\"Green to Red\",\"gaugeColorMode\":\"None\",\"useRange\":false,\"colorsRange\":[{\"from\":0,\"to\":100}],\"invertColors\":false,\"labels\":{\"show\":true,\"color\":\"black\"},\"scale\":{\"show\":false,\"labels\":false,\"color\":\"#333\",\"width\":2},\"type\":\"simple\",\"style\":{\"fontSize\":\"20\",\"bgFill\":\"#000\",\"bgColor\":false,\"labelColor\":false,\"subText\":\"\"}}},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{\"customLabel\":\"Most changed file by count\"}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"group\",\"params\":{\"field\":\"audit.file.path\",\"size\":1,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}",
"description": "",
"title": "Auditbeat - File - Most changed file by count",
"uiStateJSON": "{\"vis\":{\"defaultColors\":{\"0 - 100\":\"rgb(0,104,55)\"}}}",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"auditbeat-*\",\"query\":{\"query_string\":{\"query\":\"audit.file.type:file\",\"analyze_wildcard\":true}},\"filter\":[]}"
}
}

View File

@ -0,0 +1,10 @@
{
"visState": "{\"title\":\"Auditbeat - File - Most common mode by count\",\"type\":\"metric\",\"params\":{\"addTooltip\":true,\"addLegend\":false,\"type\":\"gauge\",\"gauge\":{\"verticalSplit\":false,\"autoExtend\":false,\"percentageMode\":false,\"gaugeType\":\"Metric\",\"gaugeStyle\":\"Full\",\"backStyle\":\"Full\",\"orientation\":\"vertical\",\"colorSchema\":\"Green to Red\",\"gaugeColorMode\":\"None\",\"useRange\":false,\"colorsRange\":[{\"from\":0,\"to\":100}],\"invertColors\":false,\"labels\":{\"show\":true,\"color\":\"black\"},\"scale\":{\"show\":false,\"labels\":false,\"color\":\"#333\",\"width\":2},\"type\":\"simple\",\"style\":{\"fontSize\":\"20\",\"bgFill\":\"#000\",\"bgColor\":false,\"labelColor\":false,\"subText\":\"\"}}},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{\"customLabel\":\"Most common mode by count\"}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"group\",\"params\":{\"field\":\"audit.file.mode\",\"size\":1,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}",
"description": "",
"title": "Auditbeat - File - Most common mode by count",
"uiStateJSON": "{\"vis\":{\"defaultColors\":{\"0 - 100\":\"rgb(0,104,55)\"}}}",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"auditbeat-*\",\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true}},\"filter\":[]}"
}
}

View File

@ -0,0 +1,10 @@
{
"visState": "{\"title\":\"Auditbeat - File - Event summary\",\"type\":\"table\",\"params\":{\"perPage\":10,\"showPartialRows\":false,\"showMeticsAtAllLevels\":false,\"sort\":{\"columnIndex\":null,\"direction\":null},\"showTotal\":false,\"totalFunc\":\"sum\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"beat.hostname\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Hostname\"}},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"audit.file.path\",\"size\":20,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Path\"}},{\"id\":\"4\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"audit.file.action\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Action\"}}],\"listeners\":{}}",
"description": "",
"title": "Auditbeat - File - Event summary",
"uiStateJSON": "{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}}",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"auditbeat-*\",\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true}},\"filter\":[]}"
}
}

View File

@ -0,0 +1,10 @@
{
"visState": "{\"title\":\"Auditbeat - File - Top created\",\"type\":\"pie\",\"params\":{\"addLegend\":true,\"addTooltip\":true,\"isDonut\":false,\"legendPosition\":\"right\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"audit.file.path\",\"size\":10,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Path\"}}],\"listeners\":{}}",
"description": "",
"title": "Auditbeat - File - Top created",
"uiStateJSON": "{}",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"auditbeat-*\",\"query\":{\"query_string\":{\"query\":\"audit.file.action:created\",\"analyze_wildcard\":true}},\"filter\":[]}"
}
}

View File

@ -0,0 +1,10 @@
{
"visState": "{\"title\":\"Auditbeat - File - Top deleted\",\"type\":\"pie\",\"params\":{\"addLegend\":true,\"addTooltip\":true,\"isDonut\":false,\"legendPosition\":\"right\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"audit.file.path\",\"size\":10,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Path\"}}],\"listeners\":{}}",
"description": "",
"title": "Auditbeat - File - Top deleted",
"uiStateJSON": "{}",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"auditbeat-*\",\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"audit.file.action:deleted\"}},\"filter\":[]}"
}
}

View File

@ -0,0 +1,210 @@
{
"objects": [
{
"attributes": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"auditbeat-*\",\"query\":{\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true,\"default_field\":\"*\"}},\"language\":\"lucene\"},\"filter\":[]}"
},
"title": "Actions [Auditbeat File Integrity]",
"uiStateJSON": "{\"vis\":{\"defaultColors\":{\"0 - 100\":\"rgb(0,104,55)\"}}}",
"version": 1,
"visState": "{\"title\":\"Actions [Auditbeat File Integrity]\",\"type\":\"metric\",\"params\":{\"addLegend\":false,\"addTooltip\":true,\"gauge\":{\"autoExtend\":false,\"backStyle\":\"Full\",\"colorSchema\":\"Green to Red\",\"colorsRange\":[{\"from\":0,\"to\":100}],\"gaugeColorMode\":\"None\",\"gaugeStyle\":\"Full\",\"gaugeType\":\"Metric\",\"invertColors\":false,\"labels\":{\"color\":\"black\",\"show\":true},\"orientation\":\"vertical\",\"percentageMode\":false,\"scale\":{\"color\":\"#333\",\"labels\":false,\"show\":true,\"width\":2},\"style\":{\"bgColor\":false,\"bgFill\":\"#000\",\"fontSize\":\"24\",\"labelColor\":false,\"subText\":\"\"},\"type\":\"simple\",\"useRange\":false,\"verticalSplit\":true,\"extendRange\":false},\"type\":\"gauge\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{\"customLabel\":\"\"}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"group\",\"params\":{\"field\":\"audit.file.action\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Action\"}}]}"
},
"id": "AV0tVcg6g1PYniApZa-v",
"type": "visualization",
"version": 3
},
{
"attributes": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"auditbeat-*\",\"query\":{\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true,\"default_field\":\"*\"}},\"language\":\"lucene\"},\"filter\":[]}"
},
"title": "Events Over Time [Auditbeat File Integrity]",
"uiStateJSON": "{}",
"version": 1,
"visState": "{\"title\":\"Events Over Time [Auditbeat File Integrity]\",\"type\":\"histogram\",\"params\":{\"grid\":{\"categoryLines\":false,\"style\":{\"color\":\"#eee\"}},\"categoryAxes\":[{\"id\":\"CategoryAxis-1\",\"type\":\"category\",\"position\":\"bottom\",\"show\":true,\"style\":{},\"scale\":{\"type\":\"linear\"},\"labels\":{\"show\":true,\"truncate\":100},\"title\":{\"text\":\"@timestamp per 5 minutes\"}}],\"valueAxes\":[{\"id\":\"ValueAxis-1\",\"name\":\"LeftAxis-1\",\"type\":\"value\",\"position\":\"left\",\"show\":true,\"style\":{},\"scale\":{\"type\":\"linear\",\"mode\":\"normal\",\"defaultYExtents\":true},\"labels\":{\"show\":true,\"rotate\":0,\"filter\":false,\"truncate\":100},\"title\":{\"text\":\"Count\"}}],\"seriesParams\":[{\"show\":\"true\",\"type\":\"histogram\",\"mode\":\"stacked\",\"data\":{\"label\":\"Count\",\"id\":\"1\"},\"valueAxis\":\"ValueAxis-1\",\"drawLinesBetweenPoints\":true,\"showCircles\":true}],\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"left\",\"times\":[],\"addTimeMarker\":false,\"type\":\"histogram\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"date_histogram\",\"schema\":\"segment\",\"params\":{\"field\":\"@timestamp\",\"interval\":\"auto\",\"customInterval\":\"2h\",\"min_doc_count\":1,\"extended_bounds\":{}}},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"group\",\"params\":{\"field\":\"audit.file.action\",\"size\":10,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Action\"}}]}"
},
"id": "AV0tV05vg1PYniApZbA2",
"type": "visualization",
"version": 4
},
{
"attributes": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"auditbeat-*\",\"query\":{\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true,\"default_field\":\"*\"}},\"language\":\"lucene\"},\"filter\":[]}"
},
"title": "Top owners [Auditbeat File Integrity]",
"uiStateJSON": "{}",
"version": 1,
"visState": "{\"title\":\"Top owners [Auditbeat File Integrity]\",\"type\":\"pie\",\"params\":{\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"isDonut\":true,\"type\":\"pie\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"audit.file.owner\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Owner\"}}]}"
},
"id": "AV0tWL-Yg1PYniApZbCs",
"type": "visualization",
"version": 2
},
{
"attributes": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"auditbeat-*\",\"query\":{\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true,\"default_field\":\"*\"}},\"language\":\"lucene\"},\"filter\":[]}"
},
"title": "Top groups [Auditbeat File Integrity]",
"uiStateJSON": "{}",
"version": 1,
"visState": "{\"title\":\"Top groups [Auditbeat File Integrity]\",\"type\":\"pie\",\"params\":{\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"isDonut\":true,\"type\":\"pie\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"audit.file.group\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Group\"}}]}"
},
"id": "AV0tWSdXg1PYniApZbDU",
"type": "visualization",
"version": 2
},
{
"attributes": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"auditbeat-*\",\"query\":{\"query\":{\"query_string\":{\"query\":\"audit.file.action:updated OR audit.file.action:attributes_modified\",\"analyze_wildcard\":true,\"default_field\":\"*\"}},\"language\":\"lucene\"},\"filter\":[]}"
},
"title": "Top updated [Auditbeat File Integrity]",
"uiStateJSON": "{}",
"version": 1,
"visState": "{\"title\":\"Top updated [Auditbeat File Integrity]\",\"type\":\"pie\",\"params\":{\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"isDonut\":false,\"type\":\"pie\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"audit.file.path.raw\",\"size\":10,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Path\"}}]}"
},
"id": "AV0tW0djg1PYniApZbGL",
"type": "visualization",
"version": 3
},
{
"attributes": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"auditbeat-*\",\"query\":{\"query\":\"audit.file.mode:/0..[2367]/\",\"language\":\"lucene\"},\"filter\":[]}"
},
"title": "World Writable File Count [Auditbeat File Integrity]",
"uiStateJSON": "{\"vis\":{\"defaultColors\":{\"0 - 100\":\"rgb(0,104,55)\"}}}",
"version": 1,
"visState": "{\"title\":\"World Writable File Count [Auditbeat File Integrity]\",\"type\":\"metric\",\"params\":{\"addTooltip\":true,\"addLegend\":false,\"type\":\"gauge\",\"gauge\":{\"verticalSplit\":false,\"autoExtend\":false,\"percentageMode\":false,\"gaugeType\":\"Metric\",\"gaugeStyle\":\"Full\",\"backStyle\":\"Full\",\"orientation\":\"vertical\",\"colorSchema\":\"Green to Red\",\"gaugeColorMode\":\"None\",\"useRange\":false,\"colorsRange\":[{\"from\":0,\"to\":100}],\"invertColors\":false,\"labels\":{\"show\":false,\"color\":\"black\"},\"scale\":{\"show\":false,\"labels\":false,\"color\":\"#333\",\"width\":2},\"type\":\"simple\",\"style\":{\"fontSize\":\"23\",\"bgFill\":\"#000\",\"bgColor\":false,\"labelColor\":false,\"subText\":\"\"}}},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"cardinality\",\"schema\":\"metric\",\"params\":{\"field\":\"audit.file.inode\",\"customLabel\":\"World Writable Files\"}}]}"
},
"id": "AV0tY6jwg1PYniApZbRY",
"type": "visualization",
"version": 3
},
{
"attributes": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"auditbeat-*\",\"query\":{\"query\":\"*\",\"language\":\"lucene\"},\"filter\":[{\"meta\":{\"index\":\"auditbeat-*\",\"negate\":false,\"disabled\":false,\"alias\":null,\"type\":\"phrase\",\"key\":\"audit.file.type\",\"value\":\"file\",\"params\":{\"query\":\"file\",\"type\":\"phrase\"}},\"query\":{\"match\":{\"audit.file.type\":{\"query\":\"file\",\"type\":\"phrase\"}}},\"$state\":{\"store\":\"appState\"}}]}"
},
"title": "Most changed file by count [Auditbeat File Integrity]",
"uiStateJSON": "{\"vis\":{\"defaultColors\":{\"0 - 100\":\"rgb(0,104,55)\"}}}",
"version": 1,
"visState": "{\"title\":\"Most changed file by count [Auditbeat File Integrity]\",\"type\":\"metric\",\"params\":{\"addTooltip\":true,\"addLegend\":false,\"type\":\"gauge\",\"gauge\":{\"verticalSplit\":false,\"autoExtend\":false,\"percentageMode\":false,\"gaugeType\":\"Metric\",\"gaugeStyle\":\"Full\",\"backStyle\":\"Full\",\"orientation\":\"vertical\",\"colorSchema\":\"Green to Red\",\"gaugeColorMode\":\"None\",\"useRange\":false,\"colorsRange\":[{\"from\":0,\"to\":100}],\"invertColors\":false,\"labels\":{\"show\":true,\"color\":\"black\"},\"scale\":{\"show\":false,\"labels\":false,\"color\":\"#333\",\"width\":2},\"type\":\"simple\",\"style\":{\"fontSize\":\"20\",\"bgFill\":\"#000\",\"bgColor\":false,\"labelColor\":false,\"subText\":\"\"}}},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{\"customLabel\":\"Most changed file by count\"}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"group\",\"params\":{\"field\":\"audit.file.path.raw\",\"size\":1,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"File\"}}]}"
},
"id": "AV0tav8Ag1PYniApZbbK",
"type": "visualization",
"version": 5
},
{
"attributes": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"auditbeat-*\",\"query\":{\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true,\"default_field\":\"*\"}},\"language\":\"lucene\"},\"filter\":[]}"
},
"title": "Most common mode by count [Auditbeat File Integrity]",
"uiStateJSON": "{\"vis\":{\"defaultColors\":{\"0 - 100\":\"rgb(0,104,55)\"}}}",
"version": 1,
"visState": "{\"title\":\"Most common mode by count [Auditbeat File Integrity]\",\"type\":\"metric\",\"params\":{\"addTooltip\":true,\"addLegend\":false,\"type\":\"gauge\",\"gauge\":{\"verticalSplit\":false,\"autoExtend\":false,\"percentageMode\":false,\"gaugeType\":\"Metric\",\"gaugeStyle\":\"Full\",\"backStyle\":\"Full\",\"orientation\":\"vertical\",\"colorSchema\":\"Green to Red\",\"gaugeColorMode\":\"None\",\"useRange\":false,\"colorsRange\":[{\"from\":0,\"to\":100}],\"invertColors\":false,\"labels\":{\"show\":true,\"color\":\"black\"},\"scale\":{\"show\":false,\"labels\":false,\"color\":\"#333\",\"width\":2},\"type\":\"simple\",\"style\":{\"fontSize\":\"20\",\"bgFill\":\"#000\",\"bgColor\":false,\"labelColor\":false,\"subText\":\"\"}}},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{\"customLabel\":\"Most common mode by count\"}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"group\",\"params\":{\"field\":\"audit.file.mode\",\"size\":1,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Mode\"}}]}"
},
"id": "AV0tbcUdg1PYniApZbe1",
"type": "visualization",
"version": 3
},
{
"attributes": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"auditbeat-*\",\"query\":{\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true,\"default_field\":\"*\"}},\"language\":\"lucene\"},\"filter\":[]}"
},
"title": "File Event Summary By Host [Auditbeat File Integrity]",
"uiStateJSON": "{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}}",
"version": 1,
"visState": "{\"title\":\"File Event Summary By Host [Auditbeat File Integrity]\",\"type\":\"table\",\"params\":{\"perPage\":10,\"showPartialRows\":false,\"showMeticsAtAllLevels\":false,\"sort\":{\"columnIndex\":null,\"direction\":null},\"showTotal\":true,\"totalFunc\":\"sum\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{\"customLabel\":\"Total Events\"}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"beat.name\",\"size\":50,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Host\"}},{\"id\":\"5\",\"enabled\":true,\"type\":\"top_hits\",\"schema\":\"metric\",\"params\":{\"field\":\"@timestamp\",\"aggregate\":\"concat\",\"size\":1,\"sortField\":\"@timestamp\",\"sortOrder\":\"desc\",\"customLabel\":\"Last Report\"}}]}"
},
"id": "AV0tc_xZg1PYniApZbnL",
"type": "visualization",
"version": 4
},
{
"attributes": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"auditbeat-*\",\"query\":{\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"audit.file.action:deleted\",\"default_field\":\"*\"}},\"language\":\"lucene\"},\"filter\":[]}"
},
"title": "Top deleted [Auditbeat File Integrity]",
"uiStateJSON": "{}",
"version": 1,
"visState": "{\"title\":\"Top deleted [Auditbeat File Integrity]\",\"type\":\"pie\",\"params\":{\"addLegend\":true,\"addTooltip\":true,\"isDonut\":false,\"legendPosition\":\"right\",\"type\":\"pie\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"audit.file.path.raw\",\"size\":10,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Path\"}}]}"
},
"id": "AV0tes4Eg1PYniApZbwV",
"type": "visualization",
"version": 3
},
{
"attributes": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"auditbeat-*\",\"query\":{\"query\":{\"query_string\":{\"query\":\"audit.file.action:created\",\"analyze_wildcard\":true,\"default_field\":\"*\"}},\"language\":\"lucene\"},\"filter\":[]}"
},
"title": "Top created [Auditbeat File Integrity]",
"uiStateJSON": "{}",
"version": 1,
"visState": "{\"title\":\"Top created [Auditbeat File Integrity]\",\"type\":\"pie\",\"params\":{\"addLegend\":true,\"addTooltip\":true,\"isDonut\":false,\"legendPosition\":\"right\",\"type\":\"pie\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"audit.file.path.raw\",\"size\":10,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Path\"}}]}"
},
"id": "AV0te0TCg1PYniApZbw9",
"type": "visualization",
"version": 3
},
{
"attributes": {
"columns": [
"audit.file.path",
"audit.file.action"
],
"description": "",
"hits": 0,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"auditbeat-*\",\"highlightAll\":true,\"version\":true,\"query\":{\"language\":\"lucene\",\"query\":\"\"},\"filter\":[]}"
},
"sort": [
"@timestamp",
"desc"
],
"title": "File Integrity Events [Auditbeat File Integrity]",
"version": 1
},
"id": "a380a060-cb44-11e7-9835-2f31fe08873b",
"type": "search",
"version": 1
},
{
"attributes": {
"description": "Monitor file integrity events.",
"hits": 0,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[],\"highlightAll\":true,\"version\":true,\"query\":{\"language\":\"lucene\",\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"default_field\":\"*\",\"query\":\"*\"}}}}"
},
"optionsJSON": "{\"darkTheme\":false}",
"panelsJSON": "[{\"col\":1,\"id\":\"AV0tVcg6g1PYniApZa-v\",\"panelIndex\":1,\"row\":1,\"size_x\":2,\"size_y\":6,\"type\":\"visualization\"},{\"col\":3,\"id\":\"AV0tV05vg1PYniApZbA2\",\"panelIndex\":2,\"row\":1,\"size_x\":7,\"size_y\":6,\"type\":\"visualization\"},{\"col\":10,\"id\":\"AV0tWL-Yg1PYniApZbCs\",\"panelIndex\":3,\"row\":1,\"size_x\":3,\"size_y\":3,\"type\":\"visualization\"},{\"col\":10,\"id\":\"AV0tWSdXg1PYniApZbDU\",\"panelIndex\":4,\"row\":4,\"size_x\":3,\"size_y\":3,\"type\":\"visualization\"},{\"col\":5,\"id\":\"AV0tW0djg1PYniApZbGL\",\"panelIndex\":5,\"row\":9,\"size_x\":4,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"id\":\"AV0tY6jwg1PYniApZbRY\",\"panelIndex\":6,\"row\":7,\"size_x\":4,\"size_y\":2,\"type\":\"visualization\"},{\"col\":5,\"id\":\"AV0tav8Ag1PYniApZbbK\",\"panelIndex\":7,\"row\":7,\"size_x\":4,\"size_y\":2,\"type\":\"visualization\"},{\"col\":9,\"id\":\"AV0tbcUdg1PYniApZbe1\",\"panelIndex\":8,\"row\":7,\"size_x\":4,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"AV0tc_xZg1PYniApZbnL\",\"panelIndex\":9,\"row\":12,\"size_x\":6,\"size_y\":5,\"type\":\"visualization\"},{\"col\":9,\"id\":\"AV0tes4Eg1PYniApZbwV\",\"panelIndex\":10,\"row\":9,\"size_x\":4,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"id\":\"AV0te0TCg1PYniApZbw9\",\"panelIndex\":11,\"row\":9,\"size_x\":4,\"size_y\":3,\"type\":\"visualization\"},{\"col\":7,\"columns\":[\"audit.file.path\",\"audit.file.action\"],\"id\":\"a380a060-cb44-11e7-9835-2f31fe08873b\",\"panelIndex\":12,\"row\":12,\"size_x\":6,\"size_y\":5,\"sort\":[\"@timestamp\",\"desc\"],\"type\":\"search\"}]",
"timeRestore": false,
"title": "[Auditbeat File Integrity] Overview",
"uiStateJSON": "{\"P-1\":{\"vis\":{\"defaultColors\":{\"0 - 100\":\"rgb(0,104,55)\"}}},\"P-6\":{\"vis\":{\"defaultColors\":{\"0 - 100\":\"rgb(0,104,55)\"}}},\"P-7\":{\"vis\":{\"defaultColors\":{\"0 - 100\":\"rgb(0,104,55)\"}}},\"P-8\":{\"vis\":{\"defaultColors\":{\"0 - 100\":\"rgb(0,104,55)\"}}},\"P-9\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}}}",
"version": 1
},
"id": "AV0tXkjYg1PYniApZbKP",
"type": "dashboard",
"version": 5
}
],
"version": "6.0.0"
}

View File

@ -0,0 +1,95 @@
{
"objects": [
{
"attributes": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[],\"query\":{\"query\":\"\",\"language\":\"lucene\"}}"
},
"savedSearchId": "d382f5b0-c1c6-11e7-8995-936807a28b16",
"title": "Error Codes [Auditbeat Kernel Executions]",
"uiStateJSON": "{}",
"version": 1,
"visState": "{\"title\":\"Error Codes [Auditbeat Kernel Executions]\",\"type\":\"pie\",\"params\":{\"type\":\"pie\",\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"isDonut\":true},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"audit.kernel.data.exit\",\"exclude\":\"0\",\"size\":10,\"order\":\"desc\",\"orderBy\":\"1\"}}]}"
},
"id": "20a8e8d0-c1c8-11e7-8995-936807a28b16",
"type": "visualization",
"version": 1
},
{
"attributes": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"auditbeat-*\",\"filter\":[],\"query\":{\"query\":\"\",\"language\":\"lucene\"}}"
},
"title": "Primary Username Tag Cloud [Auditbeat Kernel]",
"uiStateJSON": "{}",
"version": 1,
"visState": "{\"title\":\"Primary Username Tag Cloud [Auditbeat Kernel]\",\"type\":\"tagcloud\",\"params\":{\"scale\":\"linear\",\"orientation\":\"single\",\"minFontSize\":18,\"maxFontSize\":45},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"audit.kernel.actor.primary\",\"size\":10,\"order\":\"desc\",\"orderBy\":\"1\"}}]}"
},
"id": "f81a6de0-c1c1-11e7-8995-936807a28b16",
"type": "visualization",
"version": 1
},
{
"attributes": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[],\"query\":{\"query\":\"\",\"language\":\"lucene\"}}"
},
"savedSearchId": "d382f5b0-c1c6-11e7-8995-936807a28b16",
"title": "Exe Name Tag Cloud [Auditbeat Kernel]",
"uiStateJSON": "{}",
"version": 1,
"visState": "{\"title\":\"Exe Name Tag Cloud [Auditbeat Kernel]\",\"type\":\"tagcloud\",\"params\":{\"scale\":\"linear\",\"orientation\":\"single\",\"minFontSize\":14,\"maxFontSize\":45},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"audit.kernel.data.exe\",\"size\":10,\"order\":\"desc\",\"orderBy\":\"1\"}}]}"
},
"id": "2efac370-c1ca-11e7-8995-936807a28b16",
"type": "visualization",
"version": 1
},
{
"attributes": {
"columns": [
"beat.hostname",
"audit.kernel.data.cmdline",
"audit.kernel.actor.primary",
"audit.kernel.actor.secondary",
"audit.kernel.data.exe"
],
"description": "",
"hits": 0,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"auditbeat-*\",\"highlightAll\":true,\"version\":true,\"query\":{\"language\":\"lucene\",\"query\":\"*\"},\"filter\":[{\"$state\":{\"store\":\"appState\"},\"meta\":{\"alias\":null,\"disabled\":false,\"index\":\"auditbeat-*\",\"key\":\"metricset.name\",\"negate\":false,\"params\":{\"query\":\"kernel\",\"type\":\"phrase\"},\"type\":\"phrase\",\"value\":\"kernel\"},\"query\":{\"match\":{\"metricset.name\":{\"query\":\"kernel\",\"type\":\"phrase\"}}}},{\"$state\":{\"store\":\"appState\"},\"meta\":{\"alias\":null,\"disabled\":false,\"index\":\"auditbeat-*\",\"key\":\"audit.kernel.action\",\"negate\":false,\"params\":{\"query\":\"executed\",\"type\":\"phrase\"},\"type\":\"phrase\",\"value\":\"executed\"},\"query\":{\"match\":{\"audit.kernel.action\":{\"query\":\"executed\",\"type\":\"phrase\"}}}}]}"
},
"sort": [
"@timestamp",
"desc"
],
"title": "Process Executions [Auditbeat Kernel]",
"version": 1
},
"id": "d382f5b0-c1c6-11e7-8995-936807a28b16",
"type": "search",
"version": 1
},
{
"attributes": {
"description": "",
"hits": 0,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"query\":{\"language\":\"lucene\",\"query\":\"\"},\"filter\":[],\"highlightAll\":true,\"version\":true}"
},
"optionsJSON": "{\"darkTheme\":false}",
"panelsJSON": "[{\"col\":5,\"id\":\"20a8e8d0-c1c8-11e7-8995-936807a28b16\",\"panelIndex\":1,\"row\":1,\"size_x\":4,\"size_y\":3,\"type\":\"visualization\"},{\"col\":9,\"id\":\"f81a6de0-c1c1-11e7-8995-936807a28b16\",\"panelIndex\":3,\"row\":1,\"size_x\":4,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"id\":\"2efac370-c1ca-11e7-8995-936807a28b16\",\"panelIndex\":5,\"row\":1,\"size_x\":4,\"size_y\":3,\"type\":\"visualization\"},{\"size_x\":12,\"size_y\":5,\"panelIndex\":6,\"type\":\"search\",\"id\":\"d382f5b0-c1c6-11e7-8995-936807a28b16\",\"col\":1,\"row\":4,\"columns\":[\"beat.hostname\",\"audit.kernel.data.cmdline\",\"audit.kernel.actor.primary\",\"audit.kernel.actor.secondary\",\"audit.kernel.data.exe\"],\"sort\":[\"@timestamp\",\"desc\"]}]",
"timeRestore": false,
"title": "[Auditbeat Kernel] Executions",
"uiStateJSON": "{}",
"version": 1
},
"id": "7de391b0-c1ca-11e7-8995-936807a28b16",
"type": "dashboard",
"version": 1
}
],
"version": "6.0.0"
}

View File

@ -0,0 +1,82 @@
{
"objects": [
{
"attributes": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{}"
},
"title": "Event Actions [Auditbeat Kernel Overview]",
"uiStateJSON": "{}",
"version": 1,
"visState": "{\"title\":\"Event Actions [Auditbeat Kernel Overview]\",\"type\":\"metrics\",\"params\":{\"id\":\"61ca57f0-469d-11e7-af02-69e470af7417\",\"type\":\"timeseries\",\"series\":[{\"id\":\"61ca57f1-469d-11e7-af02-69e470af7417\",\"color\":\"#68BC00\",\"split_mode\":\"terms\",\"metrics\":[{\"id\":\"6b9fb2d0-c1bc-11e7-938f-ab0645b6c431\",\"type\":\"count\"}],\"seperate_axis\":0,\"axis_position\":\"right\",\"formatter\":\"number\",\"chart_type\":\"line\",\"line_width\":1,\"point_size\":1,\"fill\":0.5,\"stacked\":\"none\",\"terms_field\":\"audit.kernel.action\",\"label\":\"Actions\"}],\"time_field\":\"@timestamp\",\"index_pattern\":\"auditbeat-*\",\"interval\":\"auto\",\"axis_position\":\"left\",\"axis_formatter\":\"number\",\"show_legend\":1,\"show_grid\":1,\"filter\":\"metricset.name:kernel\",\"background_color_rules\":[{\"id\":\"58c95a20-c1bd-11e7-938f-ab0645b6c431\"}],\"bar_color_rules\":[{\"id\":\"5bfc71a0-c1bd-11e7-938f-ab0645b6c431\"}],\"gauge_color_rules\":[{\"id\":\"5d20a650-c1bd-11e7-938f-ab0645b6c431\"}],\"gauge_width\":10,\"gauge_inner_width\":10,\"gauge_style\":\"half\",\"legend_position\":\"left\"},\"aggs\":[]}"
},
"id": "97680df0-c1c0-11e7-8995-936807a28b16",
"type": "visualization",
"version": 1
},
{
"attributes": {
"columns": [
"beat.hostname",
"audit.kernel.actor.primary",
"audit.kernel.actor.secondary",
"audit.kernel.action",
"audit.kernel.thing.what",
"audit.kernel.thing.primary",
"audit.kernel.thing.secondary",
"audit.kernel.how",
"audit.kernel.result"
],
"description": "",
"hits": 0,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"auditbeat-*\",\"highlightAll\":true,\"version\":true,\"query\":{\"query\":\"\",\"language\":\"lucene\"},\"filter\":[{\"meta\":{\"negate\":false,\"index\":\"auditbeat-*\",\"type\":\"phrase\",\"key\":\"metricset.name\",\"value\":\"kernel\",\"params\":{\"query\":\"kernel\",\"type\":\"phrase\"},\"disabled\":false,\"alias\":null},\"query\":{\"match\":{\"metricset.name\":{\"query\":\"kernel\",\"type\":\"phrase\"}}},\"$state\":{\"store\":\"appState\"}}]}"
},
"sort": [
"@timestamp",
"desc"
],
"title": "Audit Event Table [Auditbeat Kernel]",
"version": 1
},
"id": "0f10c430-c1c3-11e7-8995-936807a28b16",
"type": "search",
"version": 1
},
{
"attributes": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"auditbeat-*\",\"filter\":[],\"query\":{\"query\":\"\",\"language\":\"lucene\"}}"
},
"title": "Event Categories [Auditbeat Kernel]",
"uiStateJSON": "{}",
"version": 1,
"visState": "{\"title\":\"Event Categories [Auditbeat Kernel]\",\"type\":\"pie\",\"params\":{\"type\":\"pie\",\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"isDonut\":true},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"audit.kernel.category\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Category\"}},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"audit.kernel.action\",\"size\":20,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Action\"}}]}"
},
"id": "08679220-c25a-11e7-8692-232bd1143e8a",
"type": "visualization",
"version": 1
},
{
"attributes": {
"description": "",
"hits": 0,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"query\":{\"language\":\"lucene\",\"query\":\"\"},\"filter\":[],\"highlightAll\":true,\"version\":true}"
},
"optionsJSON": "{\"darkTheme\":false}",
"panelsJSON": "[{\"col\":1,\"id\":\"97680df0-c1c0-11e7-8995-936807a28b16\",\"panelIndex\":1,\"row\":1,\"size_x\":7,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"columns\":[\"beat.hostname\",\"audit.kernel.actor.primary\",\"audit.kernel.actor.secondary\",\"audit.kernel.action\",\"audit.kernel.thing.what\",\"audit.kernel.thing.primary\",\"audit.kernel.thing.secondary\",\"audit.kernel.how\",\"audit.kernel.result\"],\"id\":\"0f10c430-c1c3-11e7-8995-936807a28b16\",\"panelIndex\":3,\"row\":4,\"size_x\":12,\"size_y\":4,\"sort\":[\"@timestamp\",\"desc\"],\"type\":\"search\"},{\"size_x\":5,\"size_y\":3,\"panelIndex\":4,\"type\":\"visualization\",\"id\":\"08679220-c25a-11e7-8692-232bd1143e8a\",\"col\":8,\"row\":1}]",
"timeRestore": false,
"title": "[Auditbeat Kernel] Overview",
"uiStateJSON": "{}",
"version": 1
},
"id": "c0ac2c00-c1c0-11e7-8995-936807a28b16",
"type": "dashboard",
"version": 1
}
],
"version": "6.0.0"
}

View File

@ -0,0 +1,180 @@
{
"objects": [
{
"attributes": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[{\"meta\":{\"index\":\"auditbeat-*\",\"negate\":true,\"type\":\"phrase\",\"key\":\"audit.kernel.thing.secondary\",\"value\":\"0\",\"params\":{\"query\":\"0\",\"type\":\"phrase\"},\"disabled\":false,\"alias\":null,\"apply\":true},\"query\":{\"match\":{\"audit.kernel.thing.secondary\":{\"query\":\"0\",\"type\":\"phrase\"}}},\"$state\":{\"store\":\"appState\"}}],\"query\":{\"query\":\"\",\"language\":\"lucene\"}}"
},
"savedSearchId": "b4c93470-c240-11e7-8692-232bd1143e8a",
"title": "Bind (non-ephemeral) [Auditbeat Kernel]",
"uiStateJSON": "{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}}",
"version": 1,
"visState": "{\"title\":\"Bind (non-ephemeral) [Auditbeat Kernel]\",\"type\":\"table\",\"params\":{\"perPage\":10,\"showPartialRows\":false,\"showMeticsAtAllLevels\":false,\"sort\":{\"columnIndex\":null,\"direction\":null},\"showTotal\":false,\"totalFunc\":\"sum\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"audit.kernel.how\",\"size\":50,\"order\":\"desc\",\"orderBy\":\"_term\",\"customLabel\":\"Exe\"}},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"audit.kernel.thing.primary\",\"size\":10,\"order\":\"desc\",\"orderBy\":\"_term\",\"customLabel\":\"Address\"}},{\"id\":\"4\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"audit.kernel.thing.secondary\",\"size\":10,\"order\":\"desc\",\"orderBy\":\"_term\",\"customLabel\":\"Port\"}}]}"
},
"id": "faf882f0-c242-11e7-8692-232bd1143e8a",
"type": "visualization",
"version": 1
},
{
"attributes": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[],\"query\":{\"query\":\"\",\"language\":\"lucene\"}}"
},
"savedSearchId": "5438b030-c246-11e7-8692-232bd1143e8a",
"title": "Connect [Auditbeat Kernel]",
"uiStateJSON": "{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}}",
"version": 1,
"visState": "{\"title\":\"Connect [Auditbeat Kernel]\",\"type\":\"table\",\"params\":{\"perPage\":10,\"showPartialRows\":false,\"showMeticsAtAllLevels\":false,\"sort\":{\"columnIndex\":null,\"direction\":null},\"showTotal\":false,\"totalFunc\":\"sum\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"audit.kernel.data.exe\",\"size\":50,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Exe\"}},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"audit.kernel.thing.primary\",\"size\":10,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Address\"}},{\"id\":\"4\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"audit.kernel.thing.secondary\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Port\"}}]}"
},
"id": "ea483730-c246-11e7-8692-232bd1143e8a",
"type": "visualization",
"version": 1
},
{
"attributes": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[],\"query\":{\"query\":\"\",\"language\":\"lucene\"}}"
},
"savedSearchId": "e8734160-c24c-11e7-8692-232bd1143e8a",
"title": "Accept / Recvfrom Unique Address Table [Auditbeat Kernel]",
"uiStateJSON": "{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}},\"spy\":{\"mode\":{\"name\":null,\"fill\":false}}}",
"version": 1,
"visState": "{\"title\":\"Accept / Recvfrom Unique Address Table [Auditbeat Kernel]\",\"type\":\"table\",\"params\":{\"perPage\":10,\"showPartialRows\":false,\"showMeticsAtAllLevels\":false,\"sort\":{\"columnIndex\":null,\"direction\":null},\"showTotal\":false,\"totalFunc\":\"sum\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"cardinality\",\"schema\":\"metric\",\"params\":{\"field\":\"audit.kernel.thing.primary\",\"customLabel\":\"Unique Addresses\"}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"audit.kernel.how\",\"size\":50,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Exe\"}},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"audit.kernel.data.syscall\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Syscall\"}}]}"
},
"id": "ceb91de0-c250-11e7-8692-232bd1143e8a",
"type": "visualization",
"version": 1
},
{
"attributes": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{}"
},
"title": "Socket Syscalls Time Series [Auditbeat Kernel]",
"uiStateJSON": "{}",
"version": 1,
"visState": "{\"title\":\"Socket Syscalls Time Series [Auditbeat Kernel]\",\"type\":\"metrics\",\"params\":{\"id\":\"61ca57f0-469d-11e7-af02-69e470af7417\",\"type\":\"timeseries\",\"series\":[{\"id\":\"61ca57f1-469d-11e7-af02-69e470af7417\",\"color\":\"#68BC00\",\"split_mode\":\"terms\",\"metrics\":[{\"id\":\"61ca57f2-469d-11e7-af02-69e470af7417\",\"type\":\"count\"}],\"seperate_axis\":0,\"axis_position\":\"right\",\"formatter\":\"number\",\"chart_type\":\"line\",\"line_width\":1,\"point_size\":1,\"fill\":0.5,\"stacked\":\"none\",\"terms_field\":\"audit.kernel.data.syscall\",\"label\":\"syscall\"}],\"time_field\":\"@timestamp\",\"index_pattern\":\"*\",\"interval\":\"auto\",\"axis_position\":\"left\",\"axis_formatter\":\"number\",\"show_legend\":1,\"show_grid\":1,\"filter\":\"audit.kernel.thing.what:socket\",\"legend_position\":\"left\",\"bar_color_rules\":[{\"id\":\"2cebb0c0-c252-11e7-8a68-93ffe9ec5950\"}],\"gauge_color_rules\":[{\"id\":\"6c891740-c252-11e7-8a68-93ffe9ec5950\"}],\"gauge_width\":10,\"gauge_inner_width\":10,\"gauge_style\":\"half\",\"background_color_rules\":[{\"id\":\"95b603d0-c252-11e7-8a68-93ffe9ec5950\"}]},\"aggs\":[]}"
},
"id": "b21e0c70-c252-11e7-8692-232bd1143e8a",
"type": "visualization",
"version": 1
},
{
"attributes": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"auditbeat-*\",\"filter\":[],\"query\":{\"query\":\"\",\"language\":\"lucene\"}}"
},
"title": "Socket Families [Auditbeat Kernel]",
"uiStateJSON": "{}",
"version": 1,
"visState": "{\"title\":\"Socket Families [Auditbeat Kernel]\",\"type\":\"pie\",\"params\":{\"type\":\"pie\",\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"left\",\"isDonut\":true},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"audit.kernel.socket.family\",\"size\":10,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Socket Family\"}},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"audit.kernel.data.syscall\",\"size\":10,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Syscall\"}}]}"
},
"id": "a8e20450-c256-11e7-8692-232bd1143e8a",
"type": "visualization",
"version": 1
},
{
"attributes": {
"columns": [
"beat.hostname",
"audit.kernel.how",
"audit.kernel.thing.primary",
"audit.kernel.thing.secondary",
"audit.kernel.socket.family",
"audit.kernel.result"
],
"description": "",
"hits": 0,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"auditbeat-*\",\"highlightAll\":true,\"version\":true,\"query\":{\"language\":\"lucene\",\"query\":\"\"},\"filter\":[{\"$state\":{\"store\":\"appState\"},\"meta\":{\"alias\":null,\"disabled\":false,\"index\":\"auditbeat-*\",\"key\":\"metricset.name\",\"negate\":false,\"params\":{\"query\":\"kernel\",\"type\":\"phrase\"},\"type\":\"phrase\",\"value\":\"kernel\"},\"query\":{\"match\":{\"metricset.name\":{\"query\":\"kernel\",\"type\":\"phrase\"}}}},{\"$state\":{\"store\":\"appState\"},\"meta\":{\"alias\":null,\"disabled\":false,\"index\":\"auditbeat-*\",\"key\":\"audit.kernel.action\",\"negate\":false,\"params\":{\"query\":\"bound-socket\",\"type\":\"phrase\"},\"type\":\"phrase\",\"value\":\"bound-socket\"},\"query\":{\"match\":{\"audit.kernel.action\":{\"query\":\"bound-socket\",\"type\":\"phrase\"}}}},{\"meta\":{\"negate\":true,\"index\":\"auditbeat-*\",\"type\":\"phrase\",\"key\":\"audit.kernel.socket.family\",\"value\":\"netlink\",\"params\":{\"query\":\"netlink\",\"type\":\"phrase\"},\"disabled\":false,\"alias\":null},\"query\":{\"match\":{\"audit.kernel.socket.family\":{\"query\":\"netlink\",\"type\":\"phrase\"}}},\"$state\":{\"store\":\"appState\"}}]}"
},
"sort": [
"@timestamp",
"desc"
],
"title": "Socket Binds [Auditbeat Kernel]",
"version": 1
},
"id": "b4c93470-c240-11e7-8692-232bd1143e8a",
"type": "search",
"version": 1
},
{
"attributes": {
"columns": [
"beat.hostname",
"audit.kernel.how",
"audit.kernel.thing.primary",
"audit.kernel.thing.secondary",
"audit.kernel.socket.family",
"audit.kernel.result",
"audit.kernel.data.exit"
],
"description": "",
"hits": 0,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"auditbeat-*\",\"highlightAll\":true,\"version\":true,\"query\":{\"language\":\"lucene\",\"query\":\"\"},\"filter\":[{\"$state\":{\"store\":\"appState\"},\"meta\":{\"alias\":null,\"disabled\":false,\"index\":\"auditbeat-*\",\"key\":\"metricset.name\",\"negate\":false,\"params\":{\"query\":\"kernel\",\"type\":\"phrase\"},\"type\":\"phrase\",\"value\":\"kernel\"},\"query\":{\"match\":{\"metricset.name\":{\"query\":\"kernel\",\"type\":\"phrase\"}}}},{\"meta\":{\"negate\":false,\"index\":\"auditbeat-*\",\"type\":\"phrase\",\"key\":\"audit.kernel.action\",\"value\":\"connected-to\",\"params\":{\"query\":\"connected-to\",\"type\":\"phrase\"},\"disabled\":false,\"alias\":null},\"query\":{\"match\":{\"audit.kernel.action\":{\"query\":\"connected-to\",\"type\":\"phrase\"}}},\"$state\":{\"store\":\"appState\"}},{\"meta\":{\"index\":\"auditbeat-*\",\"negate\":false,\"disabled\":false,\"alias\":null,\"type\":\"exists\",\"key\":\"audit.kernel.thing.primary\",\"value\":\"exists\"},\"exists\":{\"field\":\"audit.kernel.thing.primary\"},\"$state\":{\"store\":\"appState\"}}]}"
},
"sort": [
"@timestamp",
"desc"
],
"title": "Socket Connects [Auditbeat Kernel]",
"version": 1
},
"id": "5438b030-c246-11e7-8692-232bd1143e8a",
"type": "search",
"version": 1
},
{
"attributes": {
"columns": [
"beat.hostname",
"audit.kernel.how",
"audit.kernel.thing.primary",
"audit.kernel.thing.secondary",
"audit.kernel.socket.family",
"audit.kernel.action"
],
"description": "",
"hits": 0,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"auditbeat-*\",\"highlightAll\":true,\"version\":true,\"query\":{\"language\":\"lucene\",\"query\":\"\"},\"filter\":[{\"$state\":{\"store\":\"appState\"},\"meta\":{\"alias\":null,\"disabled\":false,\"index\":\"auditbeat-*\",\"key\":\"metricset.name\",\"negate\":false,\"params\":{\"query\":\"kernel\",\"type\":\"phrase\"},\"type\":\"phrase\",\"value\":\"kernel\"},\"query\":{\"match\":{\"metricset.name\":{\"query\":\"kernel\",\"type\":\"phrase\"}}}},{\"meta\":{\"negate\":false,\"index\":\"auditbeat-*\",\"type\":\"phrase\",\"key\":\"audit.kernel.thing.what\",\"value\":\"socket\",\"params\":{\"query\":\"socket\",\"type\":\"phrase\"},\"disabled\":false,\"alias\":null},\"query\":{\"match\":{\"audit.kernel.thing.what\":{\"query\":\"socket\",\"type\":\"phrase\"}}},\"$state\":{\"store\":\"appState\"}},{\"meta\":{\"index\":\"auditbeat-*\",\"negate\":false,\"disabled\":false,\"alias\":null,\"type\":\"exists\",\"key\":\"audit.kernel.thing.primary\",\"value\":\"exists\"},\"exists\":{\"field\":\"audit.kernel.thing.primary\"},\"$state\":{\"store\":\"appState\"}},{\"query\":{\"terms\":{\"audit.kernel.action\":[\"received-from\",\"accepted-connection-from\"]}},\"meta\":{\"negate\":false,\"index\":\"auditbeat-*\",\"disabled\":false,\"alias\":\"action accepted or received from\",\"type\":\"custom\",\"key\":\"query\",\"value\":\"{\\\"terms\\\":{\\\"audit.kernel.action\\\":[\\\"received-from\\\",\\\"accepted-connection-from\\\"]}}\"},\"$state\":{\"store\":\"appState\"}}]}"
},
"sort": [
"@timestamp",
"desc"
],
"title": "Socket Accept / Recvfrom [Auditbeat Kernel]",
"version": 1
},
"id": "e8734160-c24c-11e7-8692-232bd1143e8a",
"type": "search",
"version": 1
},
{
"attributes": {
"description": "Summary of socket related syscall events.",
"hits": 0,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"query\":{\"language\":\"lucene\",\"query\":\"*\"},\"filter\":[],\"highlightAll\":true,\"version\":true}"
},
"optionsJSON": "{\"darkTheme\":false}",
"panelsJSON": "[{\"col\":7,\"id\":\"faf882f0-c242-11e7-8692-232bd1143e8a\",\"panelIndex\":1,\"row\":4,\"size_x\":6,\"size_y\":4,\"type\":\"visualization\"},{\"col\":1,\"id\":\"ea483730-c246-11e7-8692-232bd1143e8a\",\"panelIndex\":2,\"row\":8,\"size_x\":6,\"size_y\":5,\"type\":\"visualization\"},{\"col\":7,\"id\":\"ceb91de0-c250-11e7-8692-232bd1143e8a\",\"panelIndex\":3,\"row\":8,\"size_x\":6,\"size_y\":5,\"type\":\"visualization\"},{\"col\":1,\"id\":\"b21e0c70-c252-11e7-8692-232bd1143e8a\",\"panelIndex\":4,\"row\":1,\"size_x\":12,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"id\":\"a8e20450-c256-11e7-8692-232bd1143e8a\",\"panelIndex\":5,\"row\":4,\"size_x\":6,\"size_y\":4,\"type\":\"visualization\"}]",
"timeRestore": false,
"title": "[Auditbeat Kernel] Sockets",
"uiStateJSON": "{\"P-1\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}},\"P-2\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}},\"P-3\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}}}",
"version": 1
},
"id": "693a5f40-c243-11e7-8692-232bd1143e8a",
"type": "dashboard",
"version": 1
}
],
"version": "6.0.0"
}

View File

@ -0,0 +1,4 @@
// Package audit is an Auditbeat module that reports security-relevant
// information based on data captured from the operating system (OS) or services
// running on the OS.
package audit

View File

@ -0,0 +1,34 @@
{
"@timestamp": "2017-10-06T17:35:33.773Z",
"@metadata": {
"beat": "noindex",
"type": "doc",
"version": "1.2.3"
},
"audit": {
"file": {
"hashed": true,
"inode": "15329399",
"uid": 501,
"group": "staff",
"ctime": "2017-10-06T17:35:33.000Z",
"gid": 20,
"path": "/private/var/folders/8x/rnyk6yxn6w97lddn3bs02gf00000gn/T/audit-file387158249/file.data",
"mode": "0600",
"action": "created",
"mtime": "2017-10-06T17:35:33.000Z",
"size": 11,
"owner": "akroh",
"sha1": "2aae6c35c94fcfb415dbe95f408b9ce91ee846ed"
}
},
"metricset": {
"module": "audit",
"name": "file",
"rtt": 5928
},
"beat": {
"name": "host.example.com",
"hostname": "host.example.com"
}
}

View File

@ -0,0 +1,87 @@
=== Audit file metricset
The `file` metricset sends events when a file is changed (created, updated, or
deleted) on disk. The events contain file metadata and hashes.
The metricset is implemented for Linux, macOS (Darwin), and Windows.
[float]
=== How it works
This metricset uses features of the operating system to monitor file changes in
realtime. When the metricset starts it creates a subscription with the OS to
receive notifications of changes to the specified files or directories. Upon
receiving notification of a change the metricset will read the file's metadata
and the compute a hash of the file's contents.
At startup this metricset will perform an initial scan of the configured files
and directories to generate baseline data for the monitored paths and detect
changes since the last time it was run. It uses locally persisted data in order
to only send events for new or modified files.
The operating system features that power this feature are as follows.
* Linux - `inotify` is used, and therefore the kernel must have inotify support.
Inotify was initially merged into the 2.6.13 Linux kernel.
* macOS (Darwin) - `kqueue` is used. It requires one file descriptor for each
file so please check the `ulimit` values used with {beatname_uc}. The FSEvents
API was considered for the implementation, but FSEvents coalesces multiple
notifications into a single event which is inconsistent with the metricset's
behavior on other operating systems.
* Windows - `ReadDirectoryChangesW` is used.
The file metricset should not be used to monitor paths on network file systems.
[float]
=== Configuration options
This metricset has some configuration options for tuning its behavior. The
following example shows all configuration options with their default values for
Linux.
[source,yaml]
----
- module: audit
metricsets: [file]
file.paths:
- /bin
- /usr/bin
- /sbin
- /usr/sbin
- /etc
file.scan_at_start: true
file.scan_rate_per_sec: 50 MiB
file.max_file_size: 100 MiB
file.hash_types: [sha1]
----
*`file.paths`*:: A list of paths (directories or files) to watch. The watches
are non-recursive and globs are not supported. The specified paths should exist
when the metricset is started.
*`file.scan_at_start`*:: A boolean value that controls if {beatname_uc} scans
over the configured file paths at startup and send events for the files
that have been modified since the last time {beatname_uc} was running. The
default value is true.
+
This feature depends on data stored locally in `path.data` in order to determine
if a file has changed. The first time {beatname_uc} runs it will send an event
for each file it encounters.
*`file.scan_rate_per_sec`*:: When `file.scan_at_start` is enabled this sets an
average read rate defined in bytes per second for the initial scan. This
throttles the amount of CPU and I/O that {beatname_uc} consumes at startup.
The default value is "50 MiB". Setting the value to "0" disables throttling.
For convenience units can be specified as a suffix to the value. The supported
units are `b` (default), `kib`, `kb`, `mib`, `mb`, `gib`, `gb`, `tib`, `tb`,
`pib`, `pb`, `eib`, and `eb`.
*`file.max_file_size`*:: The maximum size of a file in bytes for which
{beatname_uc} will compute hashes. Files larger than this size will not be
hashed. The default value is 100 MiB. For convenience units can be specified as
a suffix to the value. The supported units are `b` (default), `kib`, `kb`, `mib`,
`mb`, `gib`, `gb`, `tib`, `tb`, `pib`, `pb`, `eib`, and `eb`.
*`file.hash_types`*:: A list of hash types to compute when the file changes.
The supported hash types are md5, sha1, sha224, sha256, sha384, sha512,
sha512_224, sha512_256, sha3_224, sha3_256, sha3_384 and sha3_512. The default value is sha1.

View File

@ -0,0 +1,124 @@
- name: file
type: group
description: >
The file metricset generates events when a file changes on disk.
fields:
- name: path
type: text
description: The path to the file.
multi_fields:
- name: raw
type: keyword
description: >
The path to the file. This is an non-analyzed field that is useful
for aggregations.
- name: target_path
type: keyword
description: The target path for symlinks.
- name: action
type: keyword
example: attributes_modified
description: >
Action describes the change that triggered the event. The possible
values are: attributes_modified, created, deleted, updated, moved, and
config_change.
- name: type
type: keyword
description: The file type (file, dir, or symlink).
- name: inode
type: keyword
description: The inode representing the file in the filesystem.
- name: uid
type: keyword
description: The user ID (UID) of the file owner.
- name: owner
type: keyword
description: The file owner's username.
- name: gid
type: keyword
description: The primary group ID (GID) of the file.
- name: group
type: keyword
description: The primary group name of the file.
- name: sid
type: keyword
description: The security identifier (SID) of the file owner (Windows only).
- name: mode
type: keyword
example: 0640
description: The mode of the file in octal representation.
- name: size
type: long
description: The file size in bytes (field is only added when `type` is `file`).
- name: mtime
type: date
description: The last modified time of the file (time when content was modified).
- name: ctime
type: date
description: The last change time of the file (time when metadata was changed).
- name: hashed
type: boolean
description: >
Boolean indicating if the event includes any file hashes.
- name: md5
type: keyword
description: MD5 hash of the file.
- name: sha1
type: keyword
description: SHA1 hash of the file.
- name: sha224
type: keyword
description: SHA224 hash of the file.
- name: sha256
type: keyword
description: SHA256 hash of the file.
- name: sha384
type: keyword
description: SHA384 hash of the file.
- name: sha3_224
type: keyword
description: SHA3_224 hash of the file.
- name: sha3_256
type: keyword
description: SHA3_256 hash of the file.
- name: sha3_384
type: keyword
description: SHA3_384 hash of the file.
- name: sha3_512
type: keyword
description: SHA3_512 hash of the file.
- name: sha512
type: keyword
description: SHA512 hash of the file.
- name: sha512_224
type: keyword
description: SHA512/224 hash of the file.
- name: sha512_256
type: keyword
description: SHA512/256 hash of the file.

View File

@ -0,0 +1,120 @@
package file
import (
"path/filepath"
"sort"
"strings"
"github.com/dustin/go-humanize"
"github.com/joeshaw/multierror"
"github.com/pkg/errors"
)
// HashType identifies a cryptographic algorithm.
type HashType string
// Unpack unpacks a string to a HashType for config parsing.
func (t *HashType) Unpack(v string) error {
*t = HashType(v)
return nil
}
var validHashes = []HashType{MD5, SHA1, SHA224, SHA256, SHA384, SHA3_224, SHA3_256, SHA3_384, SHA3_512, SHA512, SHA512_224, SHA512_256}
// Enum of hash types.
const (
MD5 HashType = "md5"
SHA1 HashType = "sha1"
SHA224 HashType = "sha224"
SHA256 HashType = "sha256"
SHA384 HashType = "sha384"
SHA3_224 HashType = "sha3_224"
SHA3_256 HashType = "sha3_256"
SHA3_384 HashType = "sha3_384"
SHA3_512 HashType = "sha3_512"
SHA512 HashType = "sha512"
SHA512_224 HashType = "sha512_224"
SHA512_256 HashType = "sha512_256"
)
// Config contains the configuration parameters for the file integrity
// metricset.
type Config struct {
Paths []string `config:"file.paths" validate:"required"`
HashTypes []HashType `config:"file.hash_types"`
MaxFileSize string `config:"file.max_file_size"`
MaxFileSizeBytes uint64 `config:",ignore"`
ScanAtStart bool `config:"file.scan_at_start"`
ScanRatePerSec string `config:"file.scan_rate_per_sec"`
ScanRateBytesPerSec uint64 `config:",ignore"`
// Recursive enables recursive monitoring of directories.
// XXX: This feature is only implemented in the scanner. It needs to be
// implemented in the fsnotify code. Don't use it yet.
Recursive bool `config:"file.recursive"`
}
// Validate validates the config data and return an error explaining all the
// problems with the config. This method modifies the given config.
func (c *Config) Validate() error {
// Resolve symlinks.
for i, p := range c.Paths {
if evalPath, err := filepath.EvalSymlinks(p); err == nil {
c.Paths[i] = evalPath
}
}
// Sort and deduplicate.
sort.Strings(c.Paths)
c.Paths = deduplicate(c.Paths)
var errs multierror.Errors
var err error
nextHash:
for _, ht := range c.HashTypes {
ht = HashType(strings.ToLower(string(ht)))
for _, validHash := range validHashes {
if ht == validHash {
continue nextHash
}
}
errs = append(errs, errors.Errorf("invalid file.hash_types value '%v'", ht))
}
c.MaxFileSizeBytes, err = humanize.ParseBytes(c.MaxFileSize)
if err != nil {
errs = append(errs, errors.Wrap(err, "invalid file.max_file_size value"))
} else if c.MaxFileSizeBytes <= 0 {
errs = append(errs, errors.Errorf("file.max_file_size value (%v) must be positive", c.MaxFileSize))
}
c.ScanRateBytesPerSec, err = humanize.ParseBytes(c.ScanRatePerSec)
if err != nil {
errs = append(errs, errors.Wrap(err, "invalid file.scan_rate_per_sec value"))
}
return errs.Err()
}
// deduplicate deduplicates the given sorted string slice. The returned slice
// reuses the same backing array as in (so don't use in after calling this).
func deduplicate(in []string) []string {
var lastValue string
out := in[:0]
for _, value := range in {
if value == lastValue {
continue
}
out = append(out, value)
lastValue = value
}
return out
}
var defaultConfig = Config{
HashTypes: []HashType{SHA1},
MaxFileSize: "100 MiB",
MaxFileSizeBytes: 100 * 1024 * 1024,
ScanAtStart: true,
ScanRatePerSec: "50 MiB",
}

View File

@ -0,0 +1,121 @@
package file
import (
"os"
"path/filepath"
"testing"
"github.com/joeshaw/multierror"
"github.com/stretchr/testify/assert"
"github.com/elastic/beats/libbeat/common"
"github.com/elastic/go-ucfg"
)
func TestConfig(t *testing.T) {
config, err := common.NewConfigFrom(map[string]interface{}{
"file.paths": []string{"/usr/bin"},
"file.hash_types": []string{"md5", "sha256"},
"file.max_file_size": "1 GiB",
"file.scan_rate_per_sec": "10MiB",
})
if err != nil {
t.Fatal(err)
}
c := defaultConfig
if err := config.Unpack(&c); err != nil {
t.Fatal(err)
}
assert.Equal(t, []HashType{MD5, SHA256}, c.HashTypes)
assert.EqualValues(t, 1024*1024*1024, c.MaxFileSizeBytes)
assert.EqualValues(t, 1024*1024*10, c.ScanRateBytesPerSec)
}
func TestConfigInvalid(t *testing.T) {
config, err := common.NewConfigFrom(map[string]interface{}{
"file.paths": []string{"/usr/bin"},
"file.hash_types": []string{"crc32", "sha256", "hmac"},
"file.max_file_size": "32 Hz",
"file.scan_rate_per_sec": "32mb/sec",
})
if err != nil {
t.Fatal(err)
}
c := defaultConfig
if err := config.Unpack(&c); err != nil {
t.Log(err)
ucfgErr, ok := err.(ucfg.Error)
if !ok {
t.Fatal("expected ucfg.Error")
}
merr, ok := ucfgErr.Reason().(*multierror.MultiError)
if !ok {
t.Fatal("expected MultiError")
}
assert.Len(t, merr.Errors, 4)
return
}
t.Fatal("expected error")
}
func TestConfigInvalidMaxFileSize(t *testing.T) {
config, err := common.NewConfigFrom(map[string]interface{}{
"file.paths": []string{"/usr/bin"},
"file.max_file_size": "0", // Value must be >= 0.
})
if err != nil {
t.Fatal(err)
}
c := defaultConfig
if err := config.Unpack(&c); err != nil {
t.Log(err)
return
}
t.Fatal("expected error")
}
func TestConfigEvalSymlinks(t *testing.T) {
dir := setupTestDir(t)
defer os.RemoveAll(dir)
config, err := common.NewConfigFrom(map[string]interface{}{
"file.paths": []string{filepath.Join(dir, "link_to_subdir")},
})
if err != nil {
t.Fatal(err)
}
c := defaultConfig
if err := config.Unpack(&c); err != nil {
t.Log(err)
return
}
// link_to_subdir was resolved to subdir.
assert.Equal(t, filepath.Base(c.Paths[0]), "subdir")
}
func TestConfigRemoveDuplicates(t *testing.T) {
config, err := common.NewConfigFrom(map[string]interface{}{
"file.paths": []string{"/path/a", "/path/a"},
})
if err != nil {
t.Fatal(err)
}
c := defaultConfig
if err := config.Unpack(&c); err != nil {
t.Log(err)
return
}
assert.Len(t, c.Paths, 1)
}

View File

@ -0,0 +1,396 @@
package file
import (
"bytes"
"crypto/md5"
"crypto/sha1"
"crypto/sha256"
"crypto/sha512"
"encoding/hex"
"encoding/json"
"fmt"
"hash"
"io"
"os"
"path/filepath"
"runtime"
"strconv"
"time"
"github.com/pkg/errors"
"golang.org/x/crypto/sha3"
"github.com/elastic/beats/libbeat/common"
"github.com/elastic/beats/metricbeat/mb"
)
// Action is a description of the change that occurred.
type Action uint8
func (a Action) String() string {
if name, found := actionNames[a]; found {
return name
}
return "unknown"
}
// List of possible Actions.
const (
None = iota << 1
AttributesModified
Created
Deleted
Updated
Moved
ConfigChange
)
var actionNames = map[Action]string{
None: "none",
AttributesModified: "attributes_modified",
Created: "created",
Deleted: "deleted",
Updated: "updated",
Moved: "moved",
ConfigChange: "config_change",
}
// Source identifies the source of an event (i.e. what triggered it).
type Source uint8
func (s Source) String() string {
if name, found := sourceNames[s]; found {
return name
}
return "unknown"
}
const (
// SourceScan identifies events triggerd by a file system scan.
SourceScan Source = iota
// SourceFSNotify identifies events triggered by a notification from the
// file system.
SourceFSNotify
)
var sourceNames = map[Source]string{
SourceScan: "scan",
SourceFSNotify: "fsnotify",
}
// Type identifies the file type (e.g. dir, file, symlink).
type Type uint8
func (t Type) String() string {
if name, found := typeNames[t]; found {
return name
}
return "unknown"
}
// Enum of possible file.Types.
const (
UnknownType Type = iota // Typically seen in deleted notifications where the object is gone.
FileType
DirType
SymlinkType
)
var typeNames = map[Type]string{
FileType: "file",
DirType: "dir",
SymlinkType: "symlink",
}
// Event describe the filesystem change and includes metadata about the file.
type Event struct {
Timestamp time.Time // Time of event.
Path string // The path associated with the event.
TargetPath string // Target path for symlinks.
Info *Metadata // File metadata (if the file exists).
Source Source // Source of the event.
Action Action // Action (like created, updated).
Hashes map[HashType][]byte // File hashes.
// Metadata
rtt time.Duration // Time taken to collect the info.
errors []error // Errors that occurred while collecting the info.
}
// Metadata contains file metadata.
type Metadata struct {
Inode uint64
UID uint32
GID uint32
SID string
Owner string
Group string
Size uint64
MTime time.Time // Last modification time.
CTime time.Time // Last metdata change time.
Type Type // File type (dir, file, symlink).
Mode os.FileMode // Permissions
}
// NewEventFromFileInfo creates a new Event based on data from a os.FileInfo
// object that has already been created. Any errors that occur are included in
// the returned Event.
func NewEventFromFileInfo(
path string,
info os.FileInfo,
err error,
action Action,
source Source,
maxFileSize uint64,
hashTypes []HashType,
) Event {
event := Event{
Timestamp: time.Now().UTC(),
Path: path,
Action: action,
Source: source,
}
// err indicates that info is invalid.
if err != nil {
event.errors = append(event.errors, err)
return event
}
// Deleted events will not have file info.
if info == nil {
return event
}
event.Info, err = NewMetadata(path, info)
if err != nil {
event.errors = append(event.errors, err)
}
if event.Info == nil {
// This should never happen (only a change in Go could cause it).
return event
}
switch event.Info.Type {
case FileType:
if event.Info.Size <= maxFileSize {
hashes, err := hashFile(event.Path, hashTypes...)
if err != nil {
event.errors = append(event.errors, err)
} else {
event.Hashes = hashes
}
}
case SymlinkType:
event.TargetPath, _ = filepath.EvalSymlinks(event.Path)
}
return event
}
// NewEvent creates a new Event. Any errors that occur are included in the
// returned Event.
func NewEvent(
path string,
action Action,
source Source,
maxFileSize uint64,
hashTypes []HashType,
) Event {
if action == Deleted {
return NewEventFromFileInfo(path, nil, nil, action, source, maxFileSize, hashTypes)
}
info, err := os.Lstat(path)
err = errors.Wrap(err, "failed to lstat")
return NewEventFromFileInfo(path, info, err, action, source, maxFileSize, hashTypes)
}
func (e *Event) String() string {
data, _ := json.Marshal(e)
return string(data)
}
func buildMapStr(e *Event) common.MapStr {
m := common.MapStr{
"@timestamp": e.Timestamp,
"path": e.Path,
"hashed": len(e.Hashes) > 0,
mb.RTTKey: e.rtt,
}
if e.Action > 0 {
m["action"] = e.Action.String()
}
if e.TargetPath != "" {
m["target_path"] = e.TargetPath
}
if e.Info != nil {
info := e.Info
m["inode"] = strconv.FormatUint(info.Inode, 10)
m["mtime"] = info.MTime
m["ctime"] = info.CTime
if e.Info.Type == FileType {
m["size"] = info.Size
}
if info.Type != UnknownType {
m["type"] = info.Type.String()
}
if runtime.GOOS == "windows" {
if info.SID != "" {
m["sid"] = info.SID
}
} else {
m["uid"] = info.UID
m["gid"] = info.GID
m["mode"] = fmt.Sprintf("%#04o", uint32(info.Mode))
}
if info.Owner != "" {
m["owner"] = info.Owner
}
if info.Group != "" {
m["group"] = info.Group
}
}
for hashType, hash := range e.Hashes {
m[string(hashType)] = hex.EncodeToString(hash)
}
return m
}
// diffEvents returns true if the file info differs between the old event and
// the new event. Changes to the timestamp and action are ignored. If old
// contains a superset of new's hashes then false is returned.
func diffEvents(old, new *Event) (Action, bool) {
if old == new {
return 0, false
}
if old == nil && new != nil {
return Created, true
}
if old != nil && new == nil {
return Deleted, true
}
if old.Path != new.Path {
return Moved, true
}
// Test if new.Hashes is a subset of old.Hashes.
hasAllHashes := true
for hashType, newValue := range new.Hashes {
oldValue, found := old.Hashes[hashType]
if !found {
hasAllHashes = false
continue
}
// The Updated action takes precedence over a new hash type being configured.
if !bytes.Equal(oldValue, newValue) {
return Updated, true
}
}
if old.TargetPath != new.TargetPath ||
(old.Info == nil && new.Info != nil) ||
(old.Info != nil && new.Info == nil) {
return AttributesModified, true
}
// Test if metadata has changed.
if o, n := old.Info, new.Info; o != nil && n != nil {
// The owner and group names are ignored (they aren't persisted).
if o.Inode != n.Inode || o.UID != n.UID || o.GID != n.GID || o.SID != n.SID ||
o.Mode != n.Mode || o.Type != n.Type {
return AttributesModified, true
}
// For files consider mtime and size.
if n.Type == FileType && (!o.MTime.Equal(n.MTime) || o.Size != n.Size) {
return AttributesModified, true
}
}
// The old event didn't have all the requested hash types.
if !hasAllHashes {
return ConfigChange, true
}
// No change.
return None, false
}
func hashFile(name string, hashType ...HashType) (map[HashType][]byte, error) {
if len(hashType) == 0 {
return nil, nil
}
var hashes []hash.Hash
for _, name := range hashType {
switch name {
case MD5:
hashes = append(hashes, md5.New())
case SHA1:
hashes = append(hashes, sha1.New())
case SHA224:
hashes = append(hashes, sha256.New224())
case SHA256:
hashes = append(hashes, sha256.New())
case SHA384:
hashes = append(hashes, sha512.New384())
case SHA3_224:
hashes = append(hashes, sha3.New224())
case SHA3_256:
hashes = append(hashes, sha3.New256())
case SHA3_384:
hashes = append(hashes, sha3.New384())
case SHA3_512:
hashes = append(hashes, sha3.New512())
case SHA512:
hashes = append(hashes, sha512.New())
case SHA512_224:
hashes = append(hashes, sha512.New512_224())
case SHA512_256:
hashes = append(hashes, sha512.New512_256())
default:
return nil, errors.Errorf("unknown hash type '%v'", name)
}
}
f, err := os.Open(name)
if err != nil {
return nil, errors.Wrap(err, "failed to open file for hashing")
}
defer f.Close()
hashWriter := multiWriter(hashes)
if _, err := io.Copy(hashWriter, f); err != nil {
return nil, errors.Wrap(err, "failed to calculate file hashes")
}
nameToHash := make(map[HashType][]byte, len(hashes))
for i, h := range hashes {
nameToHash[hashType[i]] = h.Sum(nil)
}
return nameToHash, nil
}
func multiWriter(hash []hash.Hash) io.Writer {
writers := make([]io.Writer, 0, len(hash))
for _, h := range hash {
writers = append(writers, h)
}
return io.MultiWriter(writers...)
}

View File

@ -0,0 +1,238 @@
package file
import (
"encoding/hex"
"fmt"
"io/ioutil"
"os"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
var testEventTime = time.Now().UTC()
func testEvent() *Event {
return &Event{
Timestamp: testEventTime,
Path: "/home/user",
Source: SourceScan,
Action: ConfigChange,
Info: &Metadata{
Type: FileType,
Inode: 123,
UID: 500,
GID: 500,
Mode: 0600,
CTime: testEventTime,
MTime: testEventTime,
},
Hashes: map[HashType][]byte{
SHA1: mustDecodeHex("abcd"),
SHA256: mustDecodeHex("1234"),
},
}
}
func TestDiffEvents(t *testing.T) {
t.Run("nil values", func(t *testing.T) {
_, changed := diffEvents(nil, nil)
assert.False(t, changed)
})
t.Run("no change", func(t *testing.T) {
e := testEvent()
_, changed := diffEvents(e, e)
assert.False(t, changed)
})
t.Run("new file", func(t *testing.T) {
action, changed := diffEvents(nil, testEvent())
assert.True(t, changed)
assert.EqualValues(t, Created, action)
})
t.Run("deleted", func(t *testing.T) {
action, changed := diffEvents(testEvent(), nil)
assert.True(t, changed)
assert.EqualValues(t, Deleted, action)
})
t.Run("moved", func(t *testing.T) {
e := testEvent()
e.Path += "_new"
action, changed := diffEvents(testEvent(), e)
assert.True(t, changed)
assert.EqualValues(t, Moved, action)
})
t.Run("updated metadata", func(t *testing.T) {
e := testEvent()
e.Info.Mode = 0644
action, changed := diffEvents(testEvent(), e)
assert.True(t, changed)
assert.EqualValues(t, AttributesModified, action, "action: %v", action)
})
t.Run("missing metadata", func(t *testing.T) {
e := testEvent()
e.Info = nil
action, changed := diffEvents(testEvent(), e)
assert.True(t, changed)
assert.EqualValues(t, AttributesModified, action)
})
t.Run("more hashes", func(t *testing.T) {
e := testEvent()
e.Hashes["md5"] = mustDecodeHex("5678")
action, changed := diffEvents(testEvent(), e)
assert.True(t, changed)
assert.EqualValues(t, ConfigChange, action)
})
t.Run("subset of hashes", func(t *testing.T) {
e := testEvent()
delete(e.Hashes, "sha256")
action, changed := diffEvents(testEvent(), e)
assert.False(t, changed)
assert.Zero(t, action)
})
t.Run("different hash values", func(t *testing.T) {
e := testEvent()
e.Hashes = map[HashType][]byte{
SHA1: mustDecodeHex("ef"),
SHA256: mustDecodeHex("1234"),
}
action, changed := diffEvents(testEvent(), e)
assert.True(t, changed)
assert.EqualValues(t, Updated, action)
})
t.Run("updated hashes and metadata", func(t *testing.T) {
e := testEvent()
e.Hashes = map[HashType][]byte{
SHA1: mustDecodeHex("ef"),
SHA256: mustDecodeHex("1234"),
}
e.Info.MTime = time.Now()
action, changed := diffEvents(testEvent(), e)
assert.True(t, changed)
assert.EqualValues(t, Updated, action)
})
}
func TestHashFile(t *testing.T) {
t.Run("valid hashes", func(t *testing.T) {
// Computed externally.
expectedHashes := map[HashType][]byte{
MD5: mustDecodeHex("c897d1410af8f2c74fba11b1db511e9e"),
SHA1: mustDecodeHex("f951b101989b2c3b7471710b4e78fc4dbdfa0ca6"),
SHA224: mustDecodeHex("d301812e62eec9b1e68c0b861e62f374e0d77e8365f5ddd6cccc8693"),
SHA256: mustDecodeHex("ecf701f727d9e2d77c4aa49ac6fbbcc997278aca010bddeeb961c10cf54d435a"),
SHA384: mustDecodeHex("ec8d147738b2e4bf6f5c5ac50a9a7593fb1ee2de01474d6f8a6c7fdb7ac945580772a5225a4c7251a7c0697acb7b8405"),
SHA512: mustDecodeHex("f5408390735bf3ef0bb8aaf66eff4f8ca716093d2fec50996b479b3527e5112e3ea3b403e9e62c72155ac1e08a49b476f43ab621e1a5fc2bbb0559d8258a614d"),
SHA512_224: mustDecodeHex("fde054253f43a95559f1b6eeb8e2edba4124957b43b85d7fcb4d20d5"),
SHA512_256: mustDecodeHex("3380f6a625aac19cbdddc598ab07aea195bae000f8d4c8cd6bb8870ac25df15d"),
SHA3_224: mustDecodeHex("62e3515dae95bbd0e105bee840b7dc3b47f6d6bc772c259dbc0da31a"),
SHA3_256: mustDecodeHex("3cb5385a2987ca45888d7877fbcf92b4854f7155ae19c96cecc7ea1300c6f5a4"),
SHA3_384: mustDecodeHex("f19539818b4f29fa0ee599db4113fd81b77cd1119682e6d799a052849d2e40ef0dad84bc947ba2dee742d9731f1b9e9b"),
SHA3_512: mustDecodeHex("f0a2c0f9090c1fd6dedf211192e36a6668d2b3c7f57a35419acb1c4fc7dfffc267bbcd90f5f38676caddcab652f6aacd1ed4e0ad0a8e1e4b98f890b62b6c7c5c"),
}
f, err := ioutil.TempFile("", "input.txt")
if err != nil {
t.Fatal(err)
}
defer os.Remove(f.Name())
f.WriteString("hello world!\n")
f.Sync()
f.Close()
hashes, err := hashFile(f.Name(), validHashes...)
if err != nil {
t.Fatal(err)
}
for _, hashType := range validHashes {
if hash, found := hashes[hashType]; !found {
t.Errorf("%v not found", hashType)
} else {
delete(hashes, hashType)
expected, ok := expectedHashes[hashType]
if !ok {
t.Fatalf("hash type not found in expected hashes: %v", hashType)
}
assert.Equal(t, expected, hash, "%v hash incorrect", hashType)
}
}
assert.Len(t, hashes, 0)
})
t.Run("no hashes", func(t *testing.T) {
hashes, err := hashFile("anyfile.txt")
assert.Nil(t, hashes)
assert.NoError(t, err)
})
t.Run("invalid hash", func(t *testing.T) {
hashes, err := hashFile("anyfile.txt", "md4")
assert.Nil(t, hashes)
assert.Error(t, err)
})
t.Run("invalid file", func(t *testing.T) {
hashes, err := hashFile("anyfile.txt", "md5")
assert.Nil(t, hashes)
assert.Error(t, err)
})
}
func BenchmarkHashFile(b *testing.B) {
f, err := ioutil.TempFile("", "hash")
if err != nil {
b.Fatal(err)
}
defer os.Remove(f.Name())
zeros := make([]byte, 100)
iterations := 1024 * 1024 // 100 MiB
for i := 0; i < iterations; i++ {
if _, err = f.Write(zeros); err != nil {
b.Fatal(err)
}
}
b.Logf("file size: %v bytes", len(zeros)*iterations)
f.Sync()
f.Close()
b.ResetTimer()
for _, hashType := range validHashes {
b.Run(string(hashType), func(b *testing.B) {
for i := 0; i < b.N; i++ {
_, err = hashFile(f.Name(), hashType)
if err != nil {
b.Fatal(err)
}
}
})
}
}
func mustDecodeHex(v string) []byte {
data, err := hex.DecodeString(v)
if err != nil {
panic(fmt.Errorf("invalid hex value: %v", err))
}
return data
}

View File

@ -0,0 +1,90 @@
// +build linux freebsd openbsd netbsd windows darwin
package file
import (
"syscall"
"time"
"github.com/fsnotify/fsnotify"
"github.com/elastic/beats/libbeat/logp"
)
type reader struct {
watcher *fsnotify.Watcher
config Config
eventC chan Event
}
// NewEventReader creates a new EventProducer backed by fsnotify.
func NewEventReader(c Config) (EventProducer, error) {
watcher, err := fsnotify.NewWatcher()
if err != nil {
return nil, err
}
return &reader{
watcher: watcher,
config: c,
eventC: make(chan Event, 1),
}, nil
}
func (r *reader) Start(done <-chan struct{}) (<-chan Event, error) {
for _, p := range r.config.Paths {
if err := r.watcher.Add(p); err != nil {
if err == syscall.EMFILE {
logp.Warn("%v Failed to watch %v: %v (check the max number of "+
"open files allowed with 'ulimit -a')", logPrefix, p, err)
} else {
logp.Warn("%v Failed to watch %v: %v", logPrefix, p, err)
}
}
}
go r.consumeEvents()
return r.eventC, nil
}
func (r *reader) consumeEvents() {
defer close(r.eventC)
defer r.watcher.Close()
for {
select {
case event := <-r.watcher.Events:
if event.Name == "" {
continue
}
debugf("Received fsnotify event: path=%v action=%v",
event.Name, event.Op.String())
start := time.Now()
e := NewEvent(event.Name, opToAction(event.Op), SourceFSNotify,
r.config.MaxFileSizeBytes, r.config.HashTypes)
e.rtt = time.Since(start)
r.eventC <- e
case err := <-r.watcher.Errors:
logp.Warn("%v fsnotify watcher error: %v", logPrefix, err)
}
}
}
func opToAction(op fsnotify.Op) Action {
switch op {
case fsnotify.Create:
return Created
case fsnotify.Write:
return Updated
case fsnotify.Remove:
return Deleted
case fsnotify.Rename:
return Moved
case fsnotify.Chmod:
return AttributesModified
default:
return 0
}
}

View File

@ -0,0 +1,317 @@
package file
import (
"io/ioutil"
"os"
"path/filepath"
"runtime"
"syscall"
"testing"
"time"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
)
// ErrorSharingViolation is a Windows ERROR_SHARING_VIOLATION. It means "The
// process cannot access the file because it is being used by another process."
const ErrorSharingViolation syscall.Errno = 32
func TestEventReader(t *testing.T) {
// Make dir to monitor.
dir, err := ioutil.TempDir("", "audit")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
// Create a new EventProducer.
config := defaultConfig
config.Paths = []string{dir}
r, err := NewEventReader(config)
if err != nil {
t.Fatal(err)
}
done := make(chan struct{})
defer close(done)
events, err := r.Start(done)
if err != nil {
t.Fatal(err)
}
// Create a new file.
txt1 := filepath.Join(dir, "test1.txt")
var fileMode os.FileMode = 0640
mustRun(t, "created", func(t *testing.T) {
if err = ioutil.WriteFile(txt1, []byte("hello"), fileMode); err != nil {
t.Fatal(err)
}
event := readTimeout(t, events)
assert.EqualValues(t, Created, event.Action)
assertSameFile(t, txt1, event.Path)
if runtime.GOOS != "windows" {
assert.EqualValues(t, fileMode, event.Info.Mode)
}
})
// Rename the file.
txt2 := filepath.Join(dir, "test2.txt")
mustRun(t, "move", func(t *testing.T) {
rename(t, txt1, txt2)
received := readMax(t, 3, events)
if len(received) == 0 {
t.Fatal("no events received")
}
for _, e := range received {
switch e.Action {
case Moved, Updated:
assert.Equal(t, txt1, e.Path)
case Created:
assertSameFile(t, txt2, e.Path)
default:
t.Errorf("unexpected event: %+v", e)
}
}
})
// Chmod the file.
mustRun(t, "attributes modified", func(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip()
}
if err = os.Chmod(txt2, 0644); err != nil {
t.Fatal(err)
}
event := readTimeout(t, events)
assertSameFile(t, txt2, event.Path)
assert.EqualValues(t, AttributesModified, event.Action)
assert.EqualValues(t, 0644, event.Info.Mode)
})
// Append data to the file.
mustRun(t, "updated", func(t *testing.T) {
f, err := os.OpenFile(txt2, os.O_RDWR|os.O_APPEND, fileMode)
if err != nil {
t.Fatal(err)
}
f.WriteString(" world!")
f.Sync()
f.Close()
event := readTimeout(t, events)
assertSameFile(t, txt2, event.Path)
assert.EqualValues(t, Updated, event.Action)
if runtime.GOOS != "windows" {
assert.EqualValues(t, 0644, event.Info.Mode)
}
})
// Change the GID of the file.
mustRun(t, "chown", func(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("skip chown on windows")
}
gid := changeGID(t, txt2)
event := readTimeout(t, events)
assertSameFile(t, txt2, event.Path)
assert.EqualValues(t, AttributesModified, event.Action)
assert.EqualValues(t, gid, event.Info.GID)
})
mustRun(t, "deleted", func(t *testing.T) {
if err = os.Remove(txt2); err != nil {
t.Fatal(err)
}
event := readTimeout(t, events)
assert.EqualValues(t, Deleted, event.Action)
})
// Create a sub-directory.
subDir := filepath.Join(dir, "subdir")
mustRun(t, "dir created", func(t *testing.T) {
if err = os.Mkdir(subDir, 0755); err != nil {
t.Fatal(err)
}
event := readTimeout(t, events)
assertSameFile(t, subDir, event.Path)
})
// Test moving a file into the monitored dir from outside.
var moveInOrig string
moveIn := filepath.Join(dir, "test3.txt")
mustRun(t, "move in", func(t *testing.T) {
f, err := ioutil.TempFile("", "test3.txt")
if err != nil {
t.Fatal(err)
}
f.WriteString("move-in")
f.Sync()
f.Close()
moveInOrig = f.Name()
rename(t, moveInOrig, moveIn)
event := readTimeout(t, events)
assert.EqualValues(t, Created, event.Action)
assertSameFile(t, moveIn, event.Path)
})
// Test moving a file out of the monitored dir.
mustRun(t, "move out", func(t *testing.T) {
rename(t, moveIn, moveInOrig)
defer os.Remove(moveInOrig)
event := readTimeout(t, events)
assertSameFile(t, moveIn, event.Path)
if runtime.GOOS == "windows" {
assert.EqualValues(t, Deleted, event.Action)
} else {
assert.EqualValues(t, Moved, event.Action)
}
})
// Test that it does not monitor recursively.
subFile := filepath.Join(subDir, "foo.txt")
mustRun(t, "non-recursive", func(t *testing.T) {
if err = ioutil.WriteFile(subFile, []byte("foo"), fileMode); err != nil {
t.Fatal(err)
}
assertNoEvent(t, events)
})
}
// readTimeout reads one event from the channel and returns it. If it does
// not receive an event after one second it will time-out and fail the test.
func readTimeout(t testing.TB, events <-chan Event) Event {
select {
case <-time.After(time.Second):
t.Fatalf("%+v", errors.Errorf("timed-out waiting for event"))
case e, ok := <-events:
if !ok {
t.Fatal("failed reading from event channel")
}
t.Logf("%+v", buildMapStr(&e).StringToPrint())
return e
}
return Event{}
}
// readMax reads events from the channel over a period of one second and returns
// the events. If the max number of events is received it returns early.
func readMax(t testing.TB, max int, events <-chan Event) []Event {
timer := time.NewTimer(time.Second)
defer timer.Stop()
var received []Event
for {
select {
case <-timer.C:
return received
case e, ok := <-events:
if !ok {
t.Fatal("failed reading from event channel")
}
t.Logf("%+v", buildMapStr(&e).StringToPrint())
received = append(received, e)
if len(received) >= max {
return received
}
}
}
}
// assertNoEvent asserts that no event is received on the channel. It waits for
// 250ms.
func assertNoEvent(t testing.TB, events <-chan Event) {
select {
case e := <-events:
t.Fatal("received unexpected event", e)
case <-time.After(250 * time.Millisecond):
}
}
// assertSameFile asserts that two files are the same.
func assertSameFile(t testing.TB, f1, f2 string) {
if f1 == f2 {
return
}
info1, err := os.Lstat(f1)
if err != nil {
t.Error(err)
return
}
info2, err := os.Lstat(f2)
if err != nil {
t.Error(err)
return
}
assert.True(t, os.SameFile(info1, info2), "%v and %v are not the same file", f1, f2)
}
// changeGID changes the GID of a file using chown. It uses the second group
// that the user is a member of. If the user is only a member of one group then
// it will skip the test.
func changeGID(t testing.TB, file string) int {
groups, err := os.Getgroups()
if err != nil {
t.Fatal("failed to get groups", err)
}
if len(groups) <= 1 {
t.Skip("no group that we can change to")
}
// The second one will be a non-default group.
gid := groups[1]
if err = os.Chown(file, -1, gid); err != nil {
t.Fatal(err)
}
return gid
}
// mustRun runs a sub-test and stops the execution of the parent if the sub-test
// fails.
func mustRun(t *testing.T, name string, f func(t *testing.T)) {
if !t.Run(name, f) {
t.FailNow()
}
}
// rename renames a file or it fails the test. It retries the rename operation
// multiple times before failing.
//
// https://support.microsoft.com/en-us/help/316609/prb-error-sharing-violation-error-message-when-the-createfile-function
func rename(t *testing.T, oldPath, newPath string) {
const maxRetries = 100
for retries := 0; retries < maxRetries; retries++ {
err := os.Rename(oldPath, newPath)
if err == nil {
if retries > 0 {
t.Logf("rename needed %d retries", retries)
}
return
}
if linkErr, ok := err.(*os.LinkError); ok && linkErr.Err == ErrorSharingViolation {
time.Sleep(time.Millisecond)
continue
}
t.Fatal(err)
}
}

View File

@ -0,0 +1,9 @@
// +build !linux,!freebsd,!openbsd,!netbsd,!windows,!darwin
package file
import "github.com/pkg/errors"
func NewEventReader(c Config) (EventProducer, error) {
return errors.New("file auditing metricset is not implemented on this system")
}

View File

@ -0,0 +1,14 @@
// +build freebsd openbsd netbsd darwin
package file
import (
"syscall"
"time"
)
func fileTimes(stat *syscall.Stat_t) (atime, mtime, ctime time.Time) {
return time.Unix(0, stat.Atimespec.Nano()).UTC(),
time.Unix(0, stat.Mtimespec.Nano()).UTC(),
time.Unix(0, stat.Mtimespec.Nano()).UTC()
}

View File

@ -0,0 +1,14 @@
// +build linux
package file
import (
"syscall"
"time"
)
func fileTimes(stat *syscall.Stat_t) (atime, mtime, ctime time.Time) {
return time.Unix(0, stat.Atim.Nano()).UTC(),
time.Unix(0, stat.Mtim.Nano()).UTC(),
time.Unix(0, stat.Ctim.Nano()).UTC()
}

View File

@ -0,0 +1,59 @@
// +build linux freebsd openbsd netbsd darwin
package file
import (
"os"
"os/user"
"strconv"
"syscall"
"github.com/joeshaw/multierror"
"github.com/pkg/errors"
)
// NewMetadata returns a new Metadata object. If an error is returned it is
// still possible for a non-nil Metadata object to be returned (possibly with
// less data populated).
func NewMetadata(path string, info os.FileInfo) (*Metadata, error) {
stat, ok := info.Sys().(*syscall.Stat_t)
if !ok {
return nil, errors.Errorf("unexpected fileinfo sys type %T for %v", info.Sys(), path)
}
fileInfo := &Metadata{
Inode: stat.Ino,
UID: stat.Uid,
GID: stat.Gid,
Mode: info.Mode().Perm(),
Size: uint64(info.Size()),
}
_, fileInfo.MTime, fileInfo.CTime = fileTimes(stat)
switch {
case info.Mode().IsRegular():
fileInfo.Type = FileType
case info.IsDir():
fileInfo.Type = DirType
case info.Mode()&os.ModeSymlink > 0:
fileInfo.Type = SymlinkType
}
// Lookup UID and GID
var errs multierror.Errors
owner, err := user.LookupId(strconv.Itoa(int(fileInfo.UID)))
if err != nil {
errs = append(errs, err)
} else {
fileInfo.Owner = owner.Username
}
group, err := user.LookupGroupId(strconv.Itoa(int(fileInfo.GID)))
if err != nil {
errs = append(errs, err)
} else {
fileInfo.Group = group.Name
}
return fileInfo, errs.Err()
}

View File

@ -0,0 +1,78 @@
package file
import (
"io/ioutil"
"os"
"os/user"
"runtime"
"strconv"
"testing"
"github.com/stretchr/testify/assert"
)
func TestNewMetadata(t *testing.T) {
f, err := ioutil.TempFile("", "metadata")
if err != nil {
t.Fatal(err)
}
defer os.Remove(f.Name())
_, err = f.WriteString("metadata test")
if err != nil {
t.Fatal(err)
}
f.Sync()
f.Close()
info, err := os.Lstat(f.Name())
if err != nil {
t.Fatal(err)
}
meta, err := NewMetadata(f.Name(), info)
if err != nil {
t.Fatal(err)
}
u, err := user.Current()
if err != nil {
t.Fatal(err)
}
assert.NotZero(t, meta.Inode)
if runtime.GOOS == "windows" {
// The owner can differ from the creator if the GPO for
// "System object Default owner for objects created by members of the Administrators group"
// is set to "administrators group" rather than "object creator".
if meta.Owner == u.Username {
assert.Equal(t, u.Uid, meta.SID)
} else if meta.Owner == `BUILTIN\Administrators` {
// Well-known SID for BUILTIN_ADMINISTRATORS.
assert.Equal(t, "S-1-5-32-544", meta.SID)
} else {
t.Error("unexpected owner", meta.Owner)
}
assert.Zero(t, meta.UID)
assert.Zero(t, meta.GID)
assert.Empty(t, meta.Group)
} else {
group, err := user.LookupGroupId(u.Gid)
if err != nil {
t.Fatal(err)
}
assert.Equal(t, u.Uid, strconv.Itoa(int(meta.UID)))
assert.Equal(t, u.Gid, strconv.Itoa(int(meta.GID)))
assert.Equal(t, u.Username, meta.Owner)
assert.Equal(t, group.Name, meta.Group)
assert.Empty(t, meta.SID)
assert.EqualValues(t, 0600, meta.Mode)
}
assert.EqualValues(t, len("metadata test"), meta.Size, "size")
assert.NotZero(t, meta.MTime, "mtime")
assert.NotZero(t, meta.CTime, "ctime")
assert.Equal(t, FileType, meta.Type, "type")
}

View File

@ -0,0 +1,88 @@
// +build windows
package file
import (
"fmt"
"os"
"syscall"
"time"
"unsafe"
"github.com/joeshaw/multierror"
"github.com/pkg/errors"
"github.com/elastic/beats/filebeat/input/file"
)
// NewMetadata returns a new Metadata object. If an error is returned it is
// still possible for a non-nil Metadata object to be returned (possibly with
// less data populated).
func NewMetadata(path string, info os.FileInfo) (*Metadata, error) {
attrs, ok := info.Sys().(*syscall.Win32FileAttributeData)
if !ok {
return nil, errors.Errorf("unexpected fileinfo sys type %T for %v", info.Sys(), path)
}
state := file.GetOSState(info)
fileInfo := &Metadata{
Inode: uint64(state.IdxHi<<32 + state.IdxLo),
Mode: info.Mode(),
Size: uint64(info.Size()),
MTime: time.Unix(0, attrs.LastWriteTime.Nanoseconds()).UTC(),
CTime: time.Unix(0, attrs.CreationTime.Nanoseconds()).UTC(),
}
switch {
case info.Mode().IsRegular():
fileInfo.Type = FileType
case info.IsDir():
fileInfo.Type = DirType
case info.Mode()&os.ModeSymlink > 0:
fileInfo.Type = SymlinkType
}
// fileOwner only works on files or symlinks to file because os.Open only
// works on files. To open a dir we need to use CreateFile with the
// FILE_FLAG_BACKUP_SEMANTICS flag.
var err error
if !info.IsDir() {
fileInfo.SID, fileInfo.Owner, err = fileOwner(path)
}
return fileInfo, err
}
// fileOwner returns the SID and name (domain\user) of the file's owner.
func fileOwner(path string) (sid, owner string, err error) {
f, err := os.Open(path)
if err != nil {
return "", "", errors.Wrap(err, "failed to open file to get owner")
}
defer f.Close()
var securityID *syscall.SID
var securityDescriptor *SecurityDescriptor
if err = GetSecurityInfo(syscall.Handle(f.Fd()), FileObject,
OwnerSecurityInformation, &securityID, nil, nil, nil, &securityDescriptor); err != nil {
return "", "", errors.Wrapf(err, "failed on GetSecurityInfo for %v", path)
}
defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(securityDescriptor)))
// Covert SID to a string and lookup the username.
var errs multierror.Errors
sid, err = securityID.String()
if err != nil {
errs = append(errs, err)
}
account, domain, _, err := securityID.LookupAccount("")
if err != nil {
errs = append(errs, err)
} else {
owner = fmt.Sprintf(`%s\%s`, domain, account)
}
return sid, owner, errs.Err()
}

View File

@ -0,0 +1,318 @@
package file
import (
"os"
"sync"
"time"
"github.com/google/flatbuffers/go"
"github.com/pkg/errors"
"github.com/elastic/beats/auditbeat/module/audit/file/schema"
)
// Requires the Google flatbuffer compiler.
//go:generate flatc --go schema.fbs
var bufferPool sync.Pool
func init() {
bufferPool.New = func() interface{} {
return flatbuffers.NewBuilder(1024)
}
}
// fbGetBuilder returns a Builder that can be used for encoding data. The builder
// should be released by invoking the release function after the encoded bytes
// are no longer in used (i.e. a copy of b.FinishedBytes() has been made).
func fbGetBuilder() (b *flatbuffers.Builder, release func()) {
b = bufferPool.Get().(*flatbuffers.Builder)
b.Reset()
return b, func() { bufferPool.Put(b) }
}
// fbEncodeEvent encodes the given Event to a flatbuffer. The returned bytes
// are a pointer into the Builder's memory.
func fbEncodeEvent(b *flatbuffers.Builder, e *Event) []byte {
if e == nil {
return nil
}
offset := fbWriteEvent(b, e)
b.Finish(offset)
return b.FinishedBytes()
}
func fbWriteHash(b *flatbuffers.Builder, hashes map[HashType][]byte) flatbuffers.UOffsetT {
if len(hashes) == 0 {
return 0
}
offsets := make(map[HashType]flatbuffers.UOffsetT, len(hashes))
for name, value := range hashes {
offsets[name] = b.CreateByteVector(value)
}
schema.HashStart(b)
for hashType, offset := range offsets {
switch hashType {
case MD5:
schema.HashAddMd5(b, offset)
case SHA1:
schema.HashAddSha1(b, offset)
case SHA224:
schema.HashAddSha224(b, offset)
case SHA256:
schema.HashAddSha256(b, offset)
case SHA384:
schema.HashAddSha384(b, offset)
case SHA3_224:
schema.HashAddSha3224(b, offset)
case SHA3_256:
schema.HashAddSha3256(b, offset)
case SHA3_384:
schema.HashAddSha3384(b, offset)
case SHA3_512:
schema.HashAddSha3512(b, offset)
case SHA512:
schema.HashAddSha512(b, offset)
case SHA512_224:
schema.HashAddSha512224(b, offset)
case SHA512_256:
schema.HashAddSha512256(b, offset)
}
}
return schema.HashEnd(b)
}
func fbWriteMetadata(b *flatbuffers.Builder, m *Metadata) flatbuffers.UOffsetT {
if m == nil {
return 0
}
var sidOffset flatbuffers.UOffsetT
if m.SID != "" {
sidOffset = b.CreateString(m.SID)
}
schema.MetadataStart(b)
schema.MetadataAddInode(b, m.Inode)
schema.MetadataAddUid(b, m.UID)
schema.MetadataAddGid(b, m.GID)
if sidOffset > 0 {
schema.MetadataAddSid(b, sidOffset)
}
schema.MetadataAddMode(b, uint32(m.Mode))
switch m.Type {
case UnknownType:
schema.MetadataAddType(b, schema.TypeUnknown)
case FileType:
schema.MetadataAddType(b, schema.TypeFile)
// This info is only used for files.
schema.MetadataAddSize(b, m.Size)
schema.MetadataAddMtimeNs(b, m.MTime.UnixNano())
schema.MetadataAddCtimeNs(b, m.CTime.UnixNano())
case DirType:
schema.MetadataAddType(b, schema.TypeDir)
case SymlinkType:
schema.MetadataAddType(b, schema.TypeSymlink)
}
return schema.MetadataEnd(b)
}
func fbWriteEvent(b *flatbuffers.Builder, e *Event) flatbuffers.UOffsetT {
if e == nil {
return 0
}
hashesOffset := fbWriteHash(b, e.Hashes)
metadataOffset := fbWriteMetadata(b, e.Info)
var targetPathOffset flatbuffers.UOffsetT
if e.TargetPath != "" {
targetPathOffset = b.CreateString(e.TargetPath)
}
schema.EventStart(b)
schema.EventAddTimestampNs(b, e.Timestamp.UnixNano())
switch e.Source {
case SourceFSNotify:
schema.EventAddSource(b, schema.SourceFSNotify)
case SourceScan:
schema.EventAddSource(b, schema.SourceScan)
}
if targetPathOffset > 0 {
schema.EventAddTargetPath(b, targetPathOffset)
}
switch e.Action {
case AttributesModified:
schema.EventAddAction(b, schema.ActionAttributesModified)
case Created:
schema.EventAddAction(b, schema.ActionCreated)
case Deleted:
schema.EventAddAction(b, schema.ActionDeleted)
case Updated:
schema.EventAddAction(b, schema.ActionUpdated)
case Moved:
schema.EventAddAction(b, schema.ActionMoved)
case ConfigChange:
schema.EventAddAction(b, schema.ActionConfigChanged)
}
if metadataOffset > 0 {
schema.EventAddInfo(b, metadataOffset)
}
if hashesOffset > 0 {
schema.EventAddHashes(b, hashesOffset)
}
return schema.EventEnd(b)
}
// fbDecodeEvent decodes flatbuffer event data and copies it into an Event
// object that is returned.
func fbDecodeEvent(path string, buf []byte) *Event {
e := schema.GetRootAsEvent(buf, 0)
rtn := &Event{
Timestamp: time.Unix(0, e.TimestampNs()).UTC(),
Path: path,
TargetPath: string(e.TargetPath()),
}
switch e.Source() {
case schema.SourceScan:
rtn.Source = SourceScan
case schema.SourceFSNotify:
rtn.Source = SourceFSNotify
}
switch e.Action() {
case schema.ActionAttributesModified:
rtn.Action = AttributesModified
case schema.ActionCreated:
rtn.Action = Created
case schema.ActionDeleted:
rtn.Action = Deleted
case schema.ActionUpdated:
rtn.Action = Updated
case schema.ActionMoved:
rtn.Action = Moved
case schema.ActionConfigChanged:
rtn.Action = ConfigChange
}
rtn.Info = fbDecodeMetadata(e)
rtn.Hashes = fbDecodeHash(e)
return rtn
}
func fbDecodeMetadata(e *schema.Event) *Metadata {
info := e.Info(nil)
if info == nil {
return nil
}
rtn := &Metadata{
Inode: info.Inode(),
UID: info.Uid(),
GID: info.Gid(),
SID: string(info.Sid()),
Mode: os.FileMode(info.Mode()),
Size: info.Size(),
MTime: time.Unix(0, info.MtimeNs()).UTC(),
CTime: time.Unix(0, info.CtimeNs()).UTC(),
}
switch info.Type() {
case schema.TypeFile:
rtn.Type = FileType
case schema.TypeDir:
rtn.Type = DirType
case schema.TypeSymlink:
rtn.Type = SymlinkType
default:
rtn.Type = UnknownType
}
return rtn
}
func fbDecodeHash(e *schema.Event) map[HashType][]byte {
hash := e.Hashes(nil)
if hash == nil {
return nil
}
rtn := map[HashType][]byte{}
for _, hashType := range validHashes {
var length int
var producer func(i int) int8
switch hashType {
case MD5:
length = hash.Md5Length()
producer = hash.Md5
case SHA1:
length = hash.Sha1Length()
producer = hash.Sha1
case SHA224:
length = hash.Sha224Length()
producer = hash.Sha224
case SHA256:
length = hash.Sha256Length()
producer = hash.Sha256
case SHA384:
length = hash.Sha384Length()
producer = hash.Sha384
case SHA3_224:
length = hash.Sha3224Length()
producer = hash.Sha3224
case SHA3_256:
length = hash.Sha3256Length()
producer = hash.Sha3256
case SHA3_384:
length = hash.Sha3384Length()
producer = hash.Sha3384
case SHA3_512:
length = hash.Sha3512Length()
producer = hash.Sha3512
case SHA512:
length = hash.Sha512Length()
producer = hash.Sha512
case SHA512_224:
length = hash.Sha512224Length()
producer = hash.Sha512224
case SHA512_256:
length = hash.Sha512256Length()
producer = hash.Sha512256
default:
panic(errors.Errorf("unhandled hash type: %v", hashType))
}
if length > 0 {
hashValue := make([]byte, length)
for i := 0; i < len(hashValue); i++ {
hashValue[i] = byte(producer(i))
}
rtn[hashType] = hashValue
}
}
return rtn
}
// fbIsEventTimestampBefore returns true if the event's timestamp is before
// the given ts. This convenience function allows you to compare the event's
// timestamp without fully decoding and copying the flatbuffer event data.
func fbIsEventTimestampBefore(buf []byte, ts time.Time) bool {
e := schema.GetRootAsEvent(buf, 0)
eventTime := time.Unix(0, e.TimestampNs())
return eventTime.Before(ts)
}

View File

@ -0,0 +1,83 @@
package file
import (
"encoding/json"
"testing"
"github.com/stretchr/testify/assert"
)
func TestFBEncodeDecode(t *testing.T) {
e := testEvent()
builder, release := fbGetBuilder()
defer release()
data := fbEncodeEvent(builder, e)
t.Log("encoded length:", len(data))
out := fbDecodeEvent(e.Path, data)
if out == nil {
t.Fatal("decode returned nil")
}
assert.Equal(t, *e.Info, *out.Info)
e.Info, out.Info = nil, nil
assert.Equal(t, e, out)
}
func BenchmarkFBEncodeEvent(b *testing.B) {
builder, release := fbGetBuilder()
defer release()
e := testEvent()
b.ResetTimer()
for i := 0; i < b.N; i++ {
builder.Reset()
fbEncodeEvent(builder, e)
}
}
func BenchmarkFBEventDecode(b *testing.B) {
builder, release := fbGetBuilder()
defer release()
e := testEvent()
data := fbEncodeEvent(builder, e)
b.ResetTimer()
for i := 0; i < b.N; i++ {
if event := fbDecodeEvent(e.Path, data); event == nil {
b.Fatal("failed to decode")
}
}
}
// JSON benchmarks for comparisons.
func BenchmarkJSONEventEncoding(b *testing.B) {
e := testEvent()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := json.Marshal(e)
if err != nil {
b.Fatal(err)
}
}
}
func BenchmarkJSONEventDecode(b *testing.B) {
e := testEvent()
data, err := json.Marshal(e)
if err != nil {
b.Fatal(err)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
var e *Event
err := json.Unmarshal(data, &e)
if err != nil {
b.Fatal(err)
}
}
}

View File

@ -0,0 +1,296 @@
package file
import (
"bytes"
"os"
"time"
"github.com/boltdb/bolt"
"github.com/pkg/errors"
"github.com/elastic/beats/auditbeat/datastore"
"github.com/elastic/beats/libbeat/common/cfgwarn"
"github.com/elastic/beats/libbeat/logp"
"github.com/elastic/beats/metricbeat/mb"
"github.com/elastic/beats/metricbeat/mb/parse"
)
const (
metricsetName = "audit.file"
logPrefix = "[" + metricsetName + "]"
bucketName = metricsetName + ".v1"
)
var (
debugf = logp.MakeDebug(metricsetName)
)
func init() {
if err := mb.Registry.AddMetricSet("audit", "file", New, parse.EmptyHostParser); err != nil {
panic(err)
}
}
// EventProducer produces events.
type EventProducer interface {
// Start starts the event producer and writes events to the returned
// channel. When the producer is finished it will close the returned
// channel. If the returned event channel is not drained the producer will
// block (possibly causing data loss). The producer can be stopped
// prematurely by closing the provided done channel. An error is returned
// if the producer fails to start.
Start(done <-chan struct{}) (<-chan Event, error)
}
// MetricSet for monitoring file integrity.
type MetricSet struct {
mb.BaseMetricSet
config Config
reader EventProducer
scanner EventProducer
// Runtime params that are initialized on Run().
bucket datastore.BoltBucket
scanStart time.Time
scanChan <-chan Event
fsnotifyChan <-chan Event
}
// New returns a new file.MetricSet.
func New(base mb.BaseMetricSet) (mb.MetricSet, error) {
cfgwarn.Experimental("The %v metricset is an experimental feature", metricsetName)
config := defaultConfig
if err := base.Module().UnpackConfig(&config); err != nil {
return nil, err
}
r, err := NewEventReader(config)
if err != nil {
return nil, errors.Wrap(err, "failed to initialize audit file event reader")
}
ms := &MetricSet{
BaseMetricSet: base,
config: config,
reader: r,
}
if config.ScanAtStart {
ms.scanner, err = NewFileSystemScanner(config)
if err != nil {
return nil, errors.Wrap(err, "failed to initialize audit file scanner")
}
}
debugf("Initialized the audit file event reader. Running as euid=%v", os.Geteuid())
return ms, nil
}
// Run runs the MetricSet. The method will not return control to the caller
// until it is finished (to stop it close the reporter.Done() channel).
func (ms *MetricSet) Run(reporter mb.PushReporter) {
if !ms.init(reporter) {
return
}
for ms.fsnotifyChan != nil || ms.scanChan != nil {
select {
case event, ok := <-ms.fsnotifyChan:
if !ok {
ms.fsnotifyChan = nil
continue
}
ms.reportEvent(reporter, &event)
case event, ok := <-ms.scanChan:
if !ok {
ms.scanChan = nil
// When the scan completes purge datastore keys that no longer
// exist on disk based on being older than scanStart.
ms.purgeDeleted(reporter)
continue
}
ms.reportEvent(reporter, &event)
case <-reporter.Done():
return
}
}
}
// Close cleans up the MetricSet when it finishes.
func (ms *MetricSet) Close() error {
if ms.bucket != nil {
return ms.bucket.Close()
}
return nil
}
func (ms *MetricSet) init(reporter mb.PushReporter) bool {
bucket, err := datastore.OpenBucket(bucketName)
if err != nil {
err = errors.Wrap(err, "failed to open persistent datastore")
reporter.Error(err)
logp.Err("%v %v", logPrefix, err)
return false
}
ms.bucket = bucket.(datastore.BoltBucket)
ms.fsnotifyChan, err = ms.reader.Start(reporter.Done())
if err != nil {
err = errors.Wrap(err, "failed to start fsnotify event producer")
reporter.Error(err)
logp.Err("%v %v", logPrefix, err)
return false
}
ms.scanStart = time.Now().UTC()
if ms.scanner != nil {
ms.scanChan, err = ms.scanner.Start(reporter.Done())
if err != nil {
err = errors.Wrap(err, "failed to start file scanner")
reporter.Error(err)
logp.Err("%v %v", logPrefix, err)
return false
}
}
return true
}
func (ms *MetricSet) reportEvent(reporter mb.PushReporter, event *Event) bool {
if len(event.errors) > 0 && logp.IsDebug(metricsetName) {
debugf("Errors on %v event for %v: %v",
event.Action, event.Path, event.errors)
}
changed := ms.hasFileChangedSinceLastEvent(event)
if changed {
// Publish event if it changed.
if ok := reporter.Event(buildMapStr(event)); !ok {
return false
}
}
// Persist event locally.
if event.Action == Deleted {
if err := ms.bucket.Delete(event.Path); err != nil {
logp.Err("%v %v", logPrefix, err)
}
} else if event.Info != nil {
if err := store(ms.bucket, event); err != nil {
logp.Err("%v %v", logPrefix, err)
}
}
return true
}
func (ms *MetricSet) hasFileChangedSinceLastEvent(event *Event) bool {
// Load event from DB.
lastEvent, err := load(ms.bucket, event.Path)
if err != nil {
logp.Warn("%v %v", logPrefix, err)
return true
}
action, changed := diffEvents(lastEvent, event)
if event.Action == 0 {
event.Action = action
}
if changed && logp.IsDebug(metricsetName) {
debugf("file at %v has changed since last seen: old=%v, new=%v",
event.Path, lastEvent, event)
}
return changed
}
func (ms *MetricSet) purgeDeleted(reporter mb.PushReporter) {
for _, prefix := range ms.config.Paths {
deleted, err := purgeOlder(ms.bucket, ms.scanStart, prefix)
if err != nil {
logp.Err("%v %v", logPrefix, err)
continue
}
for _, e := range deleted {
// Don't persist!
if !reporter.Event(buildMapStr(e)) {
return
}
}
}
}
// Datastore utility functions.
// purgeOlder does a prefix scan of the keys in the datastore and purges items
// older than the specified time.
func purgeOlder(b datastore.BoltBucket, t time.Time, prefix string) ([]*Event, error) {
var (
deleted []*Event
totalKeys uint64
p = []byte(prefix)
matchesPrefix = func(path []byte) bool {
// XXX: This match may need to be smarter to accommodate multiple
// metricset instances working on similar paths (e.g. /a and /a/b)
// or when recursion is allowed.
return bytes.HasPrefix(path, p)
}
startTime = time.Now()
)
err := b.Update(func(b *bolt.Bucket) error {
c := b.Cursor()
for path, v := c.Seek(p); path != nil && matchesPrefix(path); path, v = c.Next() {
totalKeys++
if fbIsEventTimestampBefore(v, t) {
if err := c.Delete(); err != nil {
return err
}
deleted = append(deleted, &Event{
Timestamp: t,
Action: Deleted,
Path: string(path),
})
}
}
return nil
})
debugf("Purged %v of %v entries in %v for %v", len(deleted),
totalKeys, time.Since(startTime), prefix)
return deleted, err
}
// store stores and Event in the given Bucket.
func store(b datastore.Bucket, e *Event) error {
builder, release := fbGetBuilder()
defer release()
data := fbEncodeEvent(builder, e)
if err := b.Store(e.Path, data); err != nil {
return errors.Wrapf(err, "failed to locally store event for %v", e.Path)
}
return nil
}
// load loads an Event from the datastore. It return a nil Event if the key was
// not found. It returns an error if there was a failure reading from the
// datastore or decoding the data.
func load(b datastore.Bucket, path string) (*Event, error) {
var e *Event
err := b.Load(path, func(blob []byte) error {
e = fbDecodeEvent(path, blob)
return nil
})
if err != nil {
return nil, errors.Wrapf(err, "failed to load locally persisted event for %v", path)
}
return e, nil
}

View File

@ -0,0 +1,107 @@
package file
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/elastic/beats/auditbeat/datastore"
"github.com/elastic/beats/libbeat/paths"
mbtest "github.com/elastic/beats/metricbeat/mb/testing"
)
func TestData(t *testing.T) {
defer setup(t)()
dir, err := ioutil.TempDir("", "audit-file")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
go func() {
time.Sleep(100 * time.Millisecond)
file := filepath.Join(dir, "file.data")
ioutil.WriteFile(file, []byte("hello world"), 0600)
}()
ms := mbtest.NewPushMetricSet(t, getConfig(dir))
events, errs := mbtest.RunPushMetricSet(time.Second, ms)
if len(errs) > 0 {
t.Fatalf("received errors: %+v", errs)
}
if len(events) == 0 {
t.Fatal("received no events")
}
fullEvent := mbtest.CreateFullEvent(ms, events[len(events)-1])
mbtest.WriteEventToDataJSON(t, fullEvent)
}
func getConfig(path string) map[string]interface{} {
return map[string]interface{}{
"module": "audit",
"metricsets": []string{"file"},
"file.paths": []string{path},
}
}
func TestDetectDeletedFiles(t *testing.T) {
defer setup(t)()
bucket, err := datastore.OpenBucket(bucketName)
if err != nil {
t.Fatal(err)
}
defer bucket.Close()
dir, err := ioutil.TempDir("", "audit-file")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
dir, err = filepath.EvalSymlinks(dir)
if err != nil {
t.Fatal(err)
}
e := &Event{
Timestamp: time.Now().UTC(),
Path: filepath.Join(dir, "ghost.file"),
Action: Created,
}
if err = store(bucket, e); err != nil {
t.Fatal(err)
}
ms := mbtest.NewPushMetricSet(t, getConfig(dir))
events, errs := mbtest.RunPushMetricSet(time.Second, ms)
if len(errs) > 0 {
t.Fatalf("received errors: %+v", errs)
}
if !assert.Len(t, events, 2) {
return
}
event := events[0]
assert.Equal(t, dir, event["path"])
assert.Equal(t, "created", event["action"])
event = events[1]
assert.Equal(t, e.Path, event["path"])
assert.Equal(t, "deleted", event["action"])
}
func setup(t testing.TB) func() {
// path.data should be set so that the DB is written to a predictable location.
var err error
paths.Paths.Data, err = ioutil.TempDir("", "beat-data-dir")
if err != nil {
t.Fatal()
}
return func() { os.RemoveAll(paths.Paths.Data) }
}

View File

@ -0,0 +1,167 @@
package file
import (
"errors"
"fmt"
"math"
"os"
"path/filepath"
"strings"
"sync/atomic"
"time"
"github.com/dustin/go-humanize"
"github.com/juju/ratelimit"
"github.com/elastic/beats/libbeat/logp"
)
// scannerID is used as a global monotonically increasing counter for assigning
// a unique name to each scanner instance for logging purposes. Use
// atomic.AddUint32() to get a new value.
var scannerID uint32
type scanner struct {
fileCount uint64
byteCount uint64
tokenBucket *ratelimit.Bucket
done <-chan struct{}
eventC chan Event
logID string // Unique ID to correlate log messages to a single instance.
logPrefix string
config Config
}
// NewFileSystemScanner creates a new EventProducer instance that scans the
// configured file paths.
func NewFileSystemScanner(c Config) (EventProducer, error) {
logID := fmt.Sprintf("[scanner-%v]", atomic.AddUint32(&scannerID, 1))
return &scanner{
logID: logID,
logPrefix: fmt.Sprintf("%v %v", logPrefix, logID),
config: c,
eventC: make(chan Event, 1),
}, nil
}
// Start starts the EventProducer. The provided done channel can be used to stop
// the EventProducer prematurely. The returned Event channel will be closed when
// scanning is complete. The channel must drained otherwise the scanner will
// block.
func (s *scanner) Start(done <-chan struct{}) (<-chan Event, error) {
s.done = done
if s.config.ScanRateBytesPerSec > 0 {
debugf("%v creating token bucket with rate %v/sec and capacity %v",
s.logID, s.config.ScanRatePerSec,
humanize.Bytes(s.config.MaxFileSizeBytes))
s.tokenBucket = ratelimit.NewBucketWithRate(
float64(s.config.ScanRateBytesPerSec)/2., // Fill Rate
int64(s.config.MaxFileSizeBytes)) // Max Capacity
s.tokenBucket.TakeAvailable(math.MaxInt64)
}
go s.scan()
return s.eventC, nil
}
// scan iterates over the configured paths and generates events for each file.
func (s *scanner) scan() {
if logp.IsDebug(metricsetName) {
debugf("%v File system scanner is starting for paths [%v].",
s.logID, strings.Join(s.config.Paths, ", "))
defer debugf("%v File system scanner is stopping.", s.logID)
}
defer close(s.eventC)
startTime := time.Now()
for _, path := range s.config.Paths {
// Resolve symlinks to ensure we have an absolute path.
evalPath, err := filepath.EvalSymlinks(path)
if err != nil {
logp.Warn("%v failed to scan %v: %v", s.logPrefix, path, err)
continue
}
if err = s.walkDir(evalPath); err != nil {
logp.Warn("%v failed to scan %v: %v", s.logPrefix, evalPath, err)
}
}
duration := time.Since(startTime)
byteCount := atomic.LoadUint64(&s.byteCount)
fileCount := atomic.LoadUint64(&s.fileCount)
logp.Info("%v File system scan completed after %v (%v files, %v bytes, %v/sec, %f files/sec).",
s.logPrefix, duration, s.fileCount, byteCount,
humanize.Bytes(uint64(float64(byteCount)/float64(duration)*float64(time.Second))),
float64(fileCount)/float64(duration)*float64(time.Second))
}
func (s *scanner) walkDir(dir string) error {
errDone := errors.New("done")
startTime := time.Now()
err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
defer func() { startTime = time.Now() }()
event := s.newScanEvent(path, info, err)
event.rtt = time.Since(startTime)
select {
case s.eventC <- event:
case <-s.done:
return errDone
}
// Throttle reading and hashing rate.
if event.Info != nil && len(event.Hashes) > 0 {
s.throttle(event.Info.Size)
}
// Always traverse into the start dir.
if !info.IsDir() || dir == path {
return nil
}
// Only step into directories if recursion is enabled.
// Skip symlinks to dirs.
m := info.Mode()
if !s.config.Recursive || m&os.ModeSymlink > 0 {
return filepath.SkipDir
}
return nil
})
if err == errDone {
err = nil
}
return err
}
func (s *scanner) throttle(fileSize uint64) {
if s.tokenBucket == nil {
return
}
wait := s.tokenBucket.Take(int64(fileSize))
if wait > 0 {
timer := time.NewTimer(wait)
select {
case <-timer.C:
case <-s.done:
}
}
}
func (s *scanner) newScanEvent(path string, info os.FileInfo, err error) Event {
event := NewEventFromFileInfo(path, info, err, None, SourceScan,
s.config.MaxFileSizeBytes, s.config.HashTypes)
// Update metrics.
atomic.AddUint64(&s.fileCount, 1)
if event.Info != nil {
atomic.AddUint64(&s.byteCount, event.Info.Size)
}
return event
}

View File

@ -0,0 +1,137 @@
package file
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
)
func TestScanner(t *testing.T) {
dir := setupTestDir(t)
defer os.RemoveAll(dir)
config := defaultConfig
config.Paths = []string{
dir,
filepath.Join(dir, "a"),
"/does/not/exist",
}
t.Run("non-recursive", func(t *testing.T) {
reader, err := NewFileSystemScanner(config)
if err != nil {
t.Fatal(err)
}
done := make(chan struct{})
defer close(done)
eventC, err := reader.Start(done)
if err != nil {
t.Fatal(err)
}
var events []Event
for event := range eventC {
events = append(events, event)
}
assert.Len(t, events, 7)
})
t.Run("recursive", func(t *testing.T) {
c := config
c.Recursive = true
reader, err := NewFileSystemScanner(c)
if err != nil {
t.Fatal(err)
}
done := make(chan struct{})
defer close(done)
eventC, err := reader.Start(done)
if err != nil {
t.Fatal(err)
}
var foundRecursivePath bool
var events []Event
for event := range eventC {
events = append(events, event)
if filepath.Base(event.Path) == "c" {
foundRecursivePath = true
}
}
assert.Len(t, events, 8)
assert.True(t, foundRecursivePath, "expected subdir/c to be included")
})
// This smoke tests the rate limit code path, but does not validate the rate.
t.Run("with rate limit", func(t *testing.T) {
c := config
c.ScanRateBytesPerSec = 1024 * 5
reader, err := NewFileSystemScanner(c)
if err != nil {
t.Fatal(err)
}
done := make(chan struct{})
defer close(done)
eventC, err := reader.Start(done)
if err != nil {
t.Fatal(err)
}
if err != nil {
t.Fatal(err)
}
var events []Event
for event := range eventC {
events = append(events, event)
}
assert.Len(t, events, 7)
})
}
func setupTestDir(t *testing.T) string {
dir, err := ioutil.TempDir("", "audit-file-scan")
if err != nil {
t.Fatal(err)
}
if err = ioutil.WriteFile(filepath.Join(dir, "a"), []byte("file a"), 0600); err != nil {
t.Fatal(err)
}
if err = ioutil.WriteFile(filepath.Join(dir, "b"), []byte("file b"), 0600); err != nil {
t.Fatal(err)
}
if err = os.Symlink(filepath.Join(dir, "b"), filepath.Join(dir, "link_to_b")); err != nil {
t.Fatal(err)
}
if err = os.Mkdir(filepath.Join(dir, "subdir"), 0700); err != nil {
t.Fatal(err)
}
if err = ioutil.WriteFile(filepath.Join(dir, "subdir", "c"), []byte("file c"), 0600); err != nil {
t.Fatal(err)
}
if err = os.Symlink(filepath.Join(dir, "subdir"), filepath.Join(dir, "link_to_subdir")); err != nil {
t.Fatal(err)
}
return dir
}

View File

@ -0,0 +1,67 @@
namespace schema;
enum Action : ubyte {
None,
AttributesModified,
Created,
Deleted,
Updated,
Moved,
ConfigChanged,
}
enum Source : ubyte {
Scan,
FSNotify,
}
enum Type : ubyte {
Unknown,
File,
Dir,
Symlink,
}
table Metadata {
inode:ulong;
uid:uint;
gid:uint;
sid:string;
mode:uint;
size:ulong;
mtime_ns:long;
ctime_ns:long;
type:Type = 1;
}
table Hash {
md5:[byte];
// SHA-1
sha1:[byte];
// SHA-2
sha224:[byte];
sha256:[byte];
sha384:[byte];
sha512:[byte];
sha512_224:[byte];
sha512_256:[byte];
// SHA-3
sha3_224:[byte];
sha3_256:[byte];
sha3_384:[byte];
sha3_512:[byte];
}
table Event {
timestamp_ns:long;
target_path:string;
action:Action;
source:Source;
info:Metadata;
hashes:Hash;
}
root_type Event;

View File

@ -0,0 +1,23 @@
// automatically generated by the FlatBuffers compiler, do not modify
package schema
const (
ActionNone = 0
ActionAttributesModified = 1
ActionCreated = 2
ActionDeleted = 3
ActionUpdated = 4
ActionMoved = 5
ActionConfigChanged = 6
)
var EnumNamesAction = map[int]string{
ActionNone: "None",
ActionAttributesModified: "AttributesModified",
ActionCreated: "Created",
ActionDeleted: "Deleted",
ActionUpdated: "Updated",
ActionMoved: "Moved",
ActionConfigChanged: "ConfigChanged",
}

View File

@ -0,0 +1,122 @@
// automatically generated by the FlatBuffers compiler, do not modify
package schema
import (
flatbuffers "github.com/google/flatbuffers/go"
)
type Event struct {
_tab flatbuffers.Table
}
func GetRootAsEvent(buf []byte, offset flatbuffers.UOffsetT) *Event {
n := flatbuffers.GetUOffsetT(buf[offset:])
x := &Event{}
x.Init(buf, n+offset)
return x
}
func (rcv *Event) Init(buf []byte, i flatbuffers.UOffsetT) {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
}
func (rcv *Event) Table() flatbuffers.Table {
return rcv._tab
}
func (rcv *Event) TimestampNs() int64 {
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
if o != 0 {
return rcv._tab.GetInt64(o + rcv._tab.Pos)
}
return 0
}
func (rcv *Event) MutateTimestampNs(n int64) bool {
return rcv._tab.MutateInt64Slot(4, n)
}
func (rcv *Event) TargetPath() []byte {
o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
if o != 0 {
return rcv._tab.ByteVector(o + rcv._tab.Pos)
}
return nil
}
func (rcv *Event) Action() byte {
o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
if o != 0 {
return rcv._tab.GetByte(o + rcv._tab.Pos)
}
return 0
}
func (rcv *Event) MutateAction(n byte) bool {
return rcv._tab.MutateByteSlot(8, n)
}
func (rcv *Event) Source() byte {
o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
if o != 0 {
return rcv._tab.GetByte(o + rcv._tab.Pos)
}
return 0
}
func (rcv *Event) MutateSource(n byte) bool {
return rcv._tab.MutateByteSlot(10, n)
}
func (rcv *Event) Info(obj *Metadata) *Metadata {
o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
if o != 0 {
x := rcv._tab.Indirect(o + rcv._tab.Pos)
if obj == nil {
obj = new(Metadata)
}
obj.Init(rcv._tab.Bytes, x)
return obj
}
return nil
}
func (rcv *Event) Hashes(obj *Hash) *Hash {
o := flatbuffers.UOffsetT(rcv._tab.Offset(14))
if o != 0 {
x := rcv._tab.Indirect(o + rcv._tab.Pos)
if obj == nil {
obj = new(Hash)
}
obj.Init(rcv._tab.Bytes, x)
return obj
}
return nil
}
func EventStart(builder *flatbuffers.Builder) {
builder.StartObject(6)
}
func EventAddTimestampNs(builder *flatbuffers.Builder, timestampNs int64) {
builder.PrependInt64Slot(0, timestampNs, 0)
}
func EventAddTargetPath(builder *flatbuffers.Builder, targetPath flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(targetPath), 0)
}
func EventAddAction(builder *flatbuffers.Builder, action byte) {
builder.PrependByteSlot(2, action, 0)
}
func EventAddSource(builder *flatbuffers.Builder, source byte) {
builder.PrependByteSlot(3, source, 0)
}
func EventAddInfo(builder *flatbuffers.Builder, info flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(4, flatbuffers.UOffsetT(info), 0)
}
func EventAddHashes(builder *flatbuffers.Builder, hashes flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(5, flatbuffers.UOffsetT(hashes), 0)
}
func EventEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
return builder.EndObject()
}

View File

@ -0,0 +1,310 @@
// automatically generated by the FlatBuffers compiler, do not modify
package schema
import (
flatbuffers "github.com/google/flatbuffers/go"
)
type Hash struct {
_tab flatbuffers.Table
}
func GetRootAsHash(buf []byte, offset flatbuffers.UOffsetT) *Hash {
n := flatbuffers.GetUOffsetT(buf[offset:])
x := &Hash{}
x.Init(buf, n+offset)
return x
}
func (rcv *Hash) Init(buf []byte, i flatbuffers.UOffsetT) {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
}
func (rcv *Hash) Table() flatbuffers.Table {
return rcv._tab
}
func (rcv *Hash) Md5(j int) int8 {
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.GetInt8(a + flatbuffers.UOffsetT(j*1))
}
return 0
}
func (rcv *Hash) Md5Length() int {
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
if o != 0 {
return rcv._tab.VectorLen(o)
}
return 0
}
func (rcv *Hash) Sha1(j int) int8 {
o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.GetInt8(a + flatbuffers.UOffsetT(j*1))
}
return 0
}
func (rcv *Hash) Sha1Length() int {
o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
if o != 0 {
return rcv._tab.VectorLen(o)
}
return 0
}
func (rcv *Hash) Sha224(j int) int8 {
o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.GetInt8(a + flatbuffers.UOffsetT(j*1))
}
return 0
}
func (rcv *Hash) Sha224Length() int {
o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
if o != 0 {
return rcv._tab.VectorLen(o)
}
return 0
}
func (rcv *Hash) Sha256(j int) int8 {
o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.GetInt8(a + flatbuffers.UOffsetT(j*1))
}
return 0
}
func (rcv *Hash) Sha256Length() int {
o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
if o != 0 {
return rcv._tab.VectorLen(o)
}
return 0
}
func (rcv *Hash) Sha384(j int) int8 {
o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.GetInt8(a + flatbuffers.UOffsetT(j*1))
}
return 0
}
func (rcv *Hash) Sha384Length() int {
o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
if o != 0 {
return rcv._tab.VectorLen(o)
}
return 0
}
func (rcv *Hash) Sha512(j int) int8 {
o := flatbuffers.UOffsetT(rcv._tab.Offset(14))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.GetInt8(a + flatbuffers.UOffsetT(j*1))
}
return 0
}
func (rcv *Hash) Sha512Length() int {
o := flatbuffers.UOffsetT(rcv._tab.Offset(14))
if o != 0 {
return rcv._tab.VectorLen(o)
}
return 0
}
func (rcv *Hash) Sha512224(j int) int8 {
o := flatbuffers.UOffsetT(rcv._tab.Offset(16))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.GetInt8(a + flatbuffers.UOffsetT(j*1))
}
return 0
}
func (rcv *Hash) Sha512224Length() int {
o := flatbuffers.UOffsetT(rcv._tab.Offset(16))
if o != 0 {
return rcv._tab.VectorLen(o)
}
return 0
}
func (rcv *Hash) Sha512256(j int) int8 {
o := flatbuffers.UOffsetT(rcv._tab.Offset(18))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.GetInt8(a + flatbuffers.UOffsetT(j*1))
}
return 0
}
func (rcv *Hash) Sha512256Length() int {
o := flatbuffers.UOffsetT(rcv._tab.Offset(18))
if o != 0 {
return rcv._tab.VectorLen(o)
}
return 0
}
func (rcv *Hash) Sha3224(j int) int8 {
o := flatbuffers.UOffsetT(rcv._tab.Offset(20))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.GetInt8(a + flatbuffers.UOffsetT(j*1))
}
return 0
}
func (rcv *Hash) Sha3224Length() int {
o := flatbuffers.UOffsetT(rcv._tab.Offset(20))
if o != 0 {
return rcv._tab.VectorLen(o)
}
return 0
}
func (rcv *Hash) Sha3256(j int) int8 {
o := flatbuffers.UOffsetT(rcv._tab.Offset(22))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.GetInt8(a + flatbuffers.UOffsetT(j*1))
}
return 0
}
func (rcv *Hash) Sha3256Length() int {
o := flatbuffers.UOffsetT(rcv._tab.Offset(22))
if o != 0 {
return rcv._tab.VectorLen(o)
}
return 0
}
func (rcv *Hash) Sha3384(j int) int8 {
o := flatbuffers.UOffsetT(rcv._tab.Offset(24))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.GetInt8(a + flatbuffers.UOffsetT(j*1))
}
return 0
}
func (rcv *Hash) Sha3384Length() int {
o := flatbuffers.UOffsetT(rcv._tab.Offset(24))
if o != 0 {
return rcv._tab.VectorLen(o)
}
return 0
}
func (rcv *Hash) Sha3512(j int) int8 {
o := flatbuffers.UOffsetT(rcv._tab.Offset(26))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.GetInt8(a + flatbuffers.UOffsetT(j*1))
}
return 0
}
func (rcv *Hash) Sha3512Length() int {
o := flatbuffers.UOffsetT(rcv._tab.Offset(26))
if o != 0 {
return rcv._tab.VectorLen(o)
}
return 0
}
func HashStart(builder *flatbuffers.Builder) {
builder.StartObject(12)
}
func HashAddMd5(builder *flatbuffers.Builder, md5 flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(md5), 0)
}
func HashStartMd5Vector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(1, numElems, 1)
}
func HashAddSha1(builder *flatbuffers.Builder, sha1 flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(sha1), 0)
}
func HashStartSha1Vector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(1, numElems, 1)
}
func HashAddSha224(builder *flatbuffers.Builder, sha224 flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(sha224), 0)
}
func HashStartSha224Vector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(1, numElems, 1)
}
func HashAddSha256(builder *flatbuffers.Builder, sha256 flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(3, flatbuffers.UOffsetT(sha256), 0)
}
func HashStartSha256Vector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(1, numElems, 1)
}
func HashAddSha384(builder *flatbuffers.Builder, sha384 flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(4, flatbuffers.UOffsetT(sha384), 0)
}
func HashStartSha384Vector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(1, numElems, 1)
}
func HashAddSha512(builder *flatbuffers.Builder, sha512 flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(5, flatbuffers.UOffsetT(sha512), 0)
}
func HashStartSha512Vector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(1, numElems, 1)
}
func HashAddSha512224(builder *flatbuffers.Builder, sha512224 flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(6, flatbuffers.UOffsetT(sha512224), 0)
}
func HashStartSha512224Vector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(1, numElems, 1)
}
func HashAddSha512256(builder *flatbuffers.Builder, sha512256 flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(7, flatbuffers.UOffsetT(sha512256), 0)
}
func HashStartSha512256Vector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(1, numElems, 1)
}
func HashAddSha3224(builder *flatbuffers.Builder, sha3224 flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(8, flatbuffers.UOffsetT(sha3224), 0)
}
func HashStartSha3224Vector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(1, numElems, 1)
}
func HashAddSha3256(builder *flatbuffers.Builder, sha3256 flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(9, flatbuffers.UOffsetT(sha3256), 0)
}
func HashStartSha3256Vector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(1, numElems, 1)
}
func HashAddSha3384(builder *flatbuffers.Builder, sha3384 flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(10, flatbuffers.UOffsetT(sha3384), 0)
}
func HashStartSha3384Vector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(1, numElems, 1)
}
func HashAddSha3512(builder *flatbuffers.Builder, sha3512 flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(11, flatbuffers.UOffsetT(sha3512), 0)
}
func HashStartSha3512Vector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(1, numElems, 1)
}
func HashEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
return builder.EndObject()
}

Some files were not shown because too many files have changed in this diff Show More