Update to libbeat 6.5

This commit is contained in:
Blerim Sheqa 2018-12-19 13:17:47 +01:00
parent 6a1ac8ba5a
commit 592affc521
4199 changed files with 513150 additions and 45226 deletions

View File

@ -8,4 +8,5 @@ See https://www.elastic.co/community/security for more information.
For confirmed bugs, please report:
- Version:
- Operating System:
- Discuss Forum URL:
- Steps to Reproduce:

View File

@ -8,6 +8,7 @@
/*/fields.yml
/*/*.template*.json
**/html_docs
/*/_meta/kibana.generated
# Files
.DS_Store

View File

@ -1 +1 @@
1.9.4
1.10.6

View File

@ -14,6 +14,7 @@ env:
- GOX_FLAGS="-arch amd64"
- DOCKER_COMPOSE_VERSION=1.11.1
- GO_VERSION="$(cat .go-version)"
# Newer versions of minikube fail on travis, see: https://github.com/kubernetes/minikube/issues/2704
- TRAVIS_MINIKUBE_VERSION=v0.25.2
jobs:
@ -98,6 +99,23 @@ jobs:
go: $GO_VERSION
stage: test
# Journalbeat
- os: linux
env: TARGETS="-C journalbeat testsuite"
go: $GO_VERSION
stage: test
# Functionbeat
- os: linux
env: TARGETS="-C x-pack/functionbeat testsuite"
go: $GO_VERSION
stage: test
- os: osx
env: TARGETS="TEST_ENVIRONMENT=0 -C x-pack/functionbeat testsuite"
go: $GO_VERSION
stage: test
# Generators
- os: linux
env: TARGETS="-C generator/metricbeat test"
@ -108,19 +126,13 @@ jobs:
go: $GO_VERSION
stage: test
# Docs
- os: linux
env: TARGETS="docs"
go: $GO_VERSION
stage: test
# Kubernetes
- os: linux
install: deploy/kubernetes/.travis/setup.sh
env:
- TARGETS="-C deploy/kubernetes test"
- TRAVIS_K8S_VERSION=v1.6.4
stage: test
- os: linux
install: deploy/kubernetes/.travis/setup.sh
env:
- TARGETS="-C deploy/kubernetes test"
- TRAVIS_K8S_VERSION=v1.7.5
stage: test
- os: linux
install: deploy/kubernetes/.travis/setup.sh
env:
@ -139,12 +151,23 @@ jobs:
- TARGETS="-C deploy/kubernetes test"
- TRAVIS_K8S_VERSION=v1.10.0
stage: test
# TODO include 1.11 once minikube supports it
#- os: linux
# install: deploy/kubernetes/.travis/setup.sh
# env:
# - TARGETS="-C deploy/kubernetes test"
# - TRAVIS_K8S_VERSION=v1.11.0
# stage: test
addons:
apt:
update: true
packages:
- python-virtualenv
- libpcap-dev
- xsltproc
- libxml2-utils
- libsystemd-journal-dev
before_install:
- python --version
@ -155,6 +178,8 @@ before_install:
- curl -L https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > docker-compose
- chmod +x docker-compose
- sudo mv docker-compose /usr/local/bin
- if [ $TRAVIS_OS_NAME = osx ]; then pip install virtualenv; fi
# Skips installations step
install: true

View File

@ -21,9 +21,43 @@ The list below covers the major changes between 6.3.0 and master only.
- The beat.Pipeline is now passed to cfgfile.RunnerFactory. Beats using libbeat for module reloading or autodiscovery need to be adapted. {pull}7018[7017]
- Moving of TLS helper functions and structs from `output/tls` to `tlscommon`. {pull}7054[7054]
- Port fields.yml collector to Golang {pull}6911[6911]
- Dashboards under _meta/kibana are expected to be decoded. See https://github.com/elastic/beats/pull/7224 for a conversion script. {pull}7265[7265]
- Constructor `(github.com/elastic/beats/libbeat/output/codec/json).New` expects a new `escapeHTML` parameter. {pull}7445[7445]
- Packaging has been refactored and updates are required. See the PR for migration details. {pull}7388[7388]
- `make fields` has been modified to use Mage (https://magefile.org/) in an effort to make
the building a Beat more cross-platform friendly (e.g. Windows). This requires that your Beat
has a magefile.go with a fields target. The `FIELDS_FILE_PATH` make variable is no longer
used because the value is specified in magefile.go. {pull}7670[7670]
- Renamed `-beat-name` CLI option used in `kibana_index_pattern.go` to `-beat` for consistency with other scripts in `dev-tools/cmd`. {pull}8615[8615]
==== Bugfixes
- Fix permissions of generated Filebeat filesets. {pull}7140[7140]
- Collect fields from _meta/fields.yml too. {pull}8397[8397]
- Fix issue on asset generation that could lead to different results in Windows. {pull}8464[8464]
==== Added
- Libbeat provides a global registry for beats developer that allow to register and retrieve plugin. {pull}7392[7392]
- Added more options to control required and optional fields in schema.Apply(), error returned is a plain nil if no error happened {pull}7335[7335]
- Packaging on MacOS now produces a .dmg file containing an installer (.pkg) and uninstaller for the Beat. {pull}7481[7481]
- Added mage targets `goTestUnit` and `goTestIntegration` for executing
'go test'. This captures the log to a file, summarizes the result, produces a
coverage profile (.cov), and produces an HTML coverage report. See
`mage -h goTestUnit`. {pull}7766[7766]
- Beats packaging now build non-oss binaries from code located in the x-pack folder. {issue}7783[7783]
- New function `AddTagsWithKey` is added, so `common.MapStr` can be enriched with tags with an arbitrary key. {pull}7991[7991]
- Libbeat provides a new function `cmd.GenRootCmdWithSettings` that should be preferred over deprecated functions
`cmd.GenRootCmd`, `cmd.GenRootCmdWithRunFlags`, and `cmd.GenRootCmdWithIndexPrefixWithRunFlags`. {pull}7850[7850]
- Set current year in generator templates. {pull}8396[8396]
- You can now override default settings of libbeat by using instance.Settings. {pull}8449[8449]
- Add `-space-id` option to `export_dashboards.go` script to support Kibana Spaces {pull}7942[7942]
- Add `-name` option to `asset.go` script to explicitly name the asset rather than using its filename. {pull}8693[8693]
- Add `-out` option to `kibana_index_pattern.go` to control the output dir to make it possible to write the generated output to `build/kibana` instead of `_meta/kibana.generated` (but the output dir remains unchanged at this point). {pull}8615[8615]
- Add `module_fields.go` for generated `fields.go` files for modules. {pull}8615[8615]
- Add `mage.GenerateModuleReferenceConfig` for generating reference config files that include configuration sections from the module directory. {pull}8615[8615]
- Add `mage.GenerateFieldsGo` for generating fields.go files. {pull}8615[8615]
- Add `mage.KibanaDashboards` for collecting Kibana dashboards and generating index patterns. {pull}8615[8615]
- Allow to disable config resolver using the `Settings.DisableConfigResolver` field when initializing libbeat. {pull}8769[8769]
- Add `mage.AddPlatforms` to allow to specify dependent platforms when building a beat. {pull}8889[8889]

View File

@ -7,7 +7,7 @@
// Template, add newest changes here
=== Beats version HEAD
https://github.com/elastic/beats/compare/v6.3.2...6.3[Check the HEAD diff]
https://github.com/elastic/beats/compare/v6.5.3...6.5[Check the HEAD diff]
==== Breaking changes
@ -19,28 +19,41 @@ https://github.com/elastic/beats/compare/v6.3.2...6.3[Check the HEAD diff]
*Heartbeat*
*Journalbeat*
*Metricbeat*
*Packetbeat*
*Winlogbeat*
*Functionbeat*
==== Bugfixes
*Affecting all Beats*
- Update Golang to 1.10.6. {pull}9563[9563]
*Auditbeat*
*Filebeat*
- Fix saved objects in filebeat haproxy dashboard. {pull}9417[9417]
- Fixed a memory leak when harvesters are closed. {pull}7820[7820]
*Heartbeat*
*Journalbeat*
*Metricbeat*
*Packetbeat*
*Winlogbeat*
*Functionbeat*
==== Added
*Affecting all Beats*
@ -49,14 +62,22 @@ https://github.com/elastic/beats/compare/v6.3.2...6.3[Check the HEAD diff]
*Filebeat*
- Added support on Traefik for Common Log Format and Combined Log Format mixed which is the default Traefik format {issue}8015[8015] {issue}6111[6111] {pull}8768[8768].
*Heartbeat*
*Journalbeat*
*Metricbeat*
*Packetbeat*
*Winlogbeat*
*Heartbeat*
*Functionbeat*
==== Deprecated
*Affecting all Beats*
@ -65,45 +86,472 @@ https://github.com/elastic/beats/compare/v6.3.2...6.3[Check the HEAD diff]
*Heartbeat*
*Metricbeat*
*Journalbeat*
*Packetbeat*
*Winlogbeat*
*Functionbeat*
==== Known Issue
////////////////////////////////////////////////////////////
[[release-notes-6.3.2]]
=== Beats version 6.3.2
https://github.com/elastic/beats/compare/v6.3.1...v6.3.2[View commits]
[[release-notes-6.5.3]]
=== Beats version 6.5.3
https://github.com/elastic/beats/compare/v6.5.2...v6.5.3[View commits]
==== Bugfixes
*Affecting all Beats*
- Fix registry duplicates and log resending on upgrade. {issue}7634[7634]
- Fix default value for logging.files.keepfiles. It was being set to 0 and now
it's set to the documented value of 7. {issue}7494[7494]
- Retain compatibility with older Docker server versions. {issue}7542[7542]
- Log events at the debug level when dropped by encoding problems. {pull}9251[9251]
*Filebeat*
- Correctly parse `December` or `Dec` in the Syslog input. {pull}9349[9349]
- Don't generate incomplete configurations when logs collection is disabled by hints. {pull}9305[9305]
- Stop runners disabled by hints after previously being started. {pull}9305[9305]
- Fix installation of haproxy dashboard. {issue}9307[9307] {pull}9313[9313]
[[release-notes-6.5.2]]
=== Beats version 6.5.2
https://github.com/elastic/beats/compare/v6.5.1...v6.5.2[View commits]
==== Bugfixes
*Affecting all Beats*
- Propagate Sync error when running SafeFileRotate. {pull}9069[9069]
*Metricbeat*
- Fix missing hosts config option in Ceph module. {pull}7596[7596]
- Ensure metadata updates don't replace existing pod metrics. {pull}7573[7573]
- Fix panic on docker healthcheck collection on dockers without healthchecks. {pull}9171[9171]
- Fix issue preventing diskio metrics collection for idle disks. {issue}9124[9124] {pull}9125[9125]
[[release-notes-6.5.1]]
=== Beats version 6.5.1
https://github.com/elastic/beats/compare/v6.5.0...v6.5.1[View commits]
==== Bugfixes
*Affecting all Beats*
- Fix windows binaries not having an enroll command. {issue}9096[9096] {pull}8836[8836]
*Journalbeat*
- Fix journalbeat sometimes hanging if output is unavailable. {pull}9106[9106]
*Metricbeat*
- Fix race condition when enriching events with kubernetes metadata. {issue}9055[9055] {issue}9067[9067]
==== Added
*Journalbeat*
- Add minimal kibana dashboard. {pull}9106[9106]
[[release-notes-6.5.0]]
=== Beats version 6.5.0
https://github.com/elastic/beats/compare/v6.4.0...v6.5.0[View commits]
==== Bugfixes
*Affecting all Beats*
- Fixed `add_host_metadata` not initializing correctly on Windows. {issue}7715[7715]
- Fixed missing file unlock in spool file on Windows, so file can be reopened and locked. {pull}7859[7859]
- Fix spool file opening/creation failing due to file locking on Windows. {pull}7859[7859]
- Fix size of maximum mmaped read area in spool file on Windows. {pull}7859[7859]
- Fix potential data loss on OS X in spool file by using fcntl with F_FULLFSYNC. {pull}7859[7859]
- Improve fsync on linux, by assuming the kernel resets error flags of failed writes. {pull}7859[7859]
- Remove unix-like permission checks on Windows, so files can be opened. {issue}7849[7849]
- Replace index patterns in TSVB visualizations. {pull}7929[7929]
- Deregister pipeline loader callback when inputsRunner is stopped. {pull}[7893][7893]
- Add backoff support to x-pack monitoring outputs. {issue}7966[7966]
- Removed execute permissions systemd unit file. {pull}7873[7873]
- Fix a race condition with the `add_host_metadata` and the event serialization. {pull}8223[8223] {pull}8653[8653]
- Enforce that data used by k8s or docker doesn't use any reference. {pull}8240[8240]
- Switch to different UUID lib due to to non-random generated UUIDs. {pull}8485[8485]
- Fix race condition when publishing monitoring data. {pull}8646[8646]
- Fix bug in loading dashboards from zip file. {issue}8051[8051]
- Fix in-cluster kubernetes configuration on IPv6. {pull}8754[8754]
- The export config subcommand should not display real value for field reference. {pull}8769[8769]
- The setup command will not fail if no dashboard is available to import. {pull}8977[8977]
- Fix central management configurations reload when a configuration is removed in Kibana. {issue}9010[9010]
*Auditbeat*
- Fixed a crash in the file_integrity module under Linux. {issue}7753[7753]
- Fixed the RPM by designating the config file as configuration data in the RPM spec. {issue}8075[8075]
- Fixed a concurrent map write panic in the auditd module. {pull}8158[8158]
- Fixed a data race in the file_integrity module. {issue}8009[8009]
- Fixed a deadlock in the file_integrity module. {pull}8027[8027]
*Filebeat*
- Fix date format in Mongodb Ingest pipeline. {pull}7974[7974]
- Fixed a docker input error due to the offset update bug in partial log join.{pull}8177[8177]
- Update CRI format to support partial/full tags. {pull}8265[8265]
- Fix some errors happening when stopping syslog input. {pull}8347[8347]
- Fix RFC3339 timezone and nanoseconds parsing with the syslog input. {pull}8346[8346]
- Mark the TCP and UDP input as GA. {pull}8125[8125]
- Support multiline logs in logstash/log fileset of Filebeat. {pull}8562[8562]
*Heartbeat*
- Fixed bug where HTTP responses with larger bodies would incorrectly report connection errors. {pull}8660[8660]
*Metricbeat*
- Add support for bearer token files to HTTP helper. {pull}7527[7527]
- Fix golang.heap.gc.cpu_fraction type from long to float in Golang module. {pull}7789[7789]
- Fixed the RPM by designating the modules.d config files as configuration data in the RPM spec. {issue}8075[8075]
- Fixed the location of the modules.d dir in Deb and RPM packages. {issue}8104[8104]
- Add docker diskio stats on Windows. {issue}6815[6815] {pull}8126[8126]
- Fix incorrect type conversion of average response time in Haproxy dashboards {pull}8404[8404]
- Added io disk read and write times to system module {issue}8473[8473] {pull}8508[8508]
- Avoid mapping issues in kubernetes module. {pull}8487[8487]
- Recover metrics for old apache versions removed by mistake on #6450. {pull}7871[7871]
- Fix dropwizard module parsing of metric names. {issue}8365[8365] {pull}6385[8385]
- Fix issue that would prevent kafka module to find a proper broker when port is not set {pull}8613[8613]
- Fix range colors in multiple visualizations. {issue}8633[8633] {pull}8634[8634]
- Fix incorrect header parsing on http metricbeat module {issue}8564[8564] {pull}8585[8585]
- Fixed a panic when the kvm module cannot establish a connection to libvirtd. {issue}7792[7792].
- Add missing namespace field in http server metricset {pull}7890[7890]
*Packetbeat*
- Updated the TLS protocol parser with new cipher suites added to TLS 1.3. {issue}7455[7455]
- Fixed a seccomp related error where the `fcntl64` syscall was not permitted
on 32-bit Linux and the sniffer failed to start. {issue}7839[7839]
- Added missing `cmdline` and `client_cmdline` fields to index template. {pull}8258[8258]
==== Added
*Affecting all Beats*
- Added time-based log rotation. {pull}8349[8349]
- Add backoff on error support to redis output. {pull}7781[7781]
- Allow for cloud-id to specify a custom port. This makes cloud-id work in ECE contexts. {pull}7887[7887]
- Add support to grow or shrink an existing spool file between restarts. {pull}7859[7859]
- Make kubernetes autodiscover ignore events with empty container IDs {pull}7971[7971]
- Implement CheckConfig in RunnerFactory to make autodiscover check configs {pull}7961[7961]
- Add DNS processor with support for performing reverse lookups on IP addresses. {issue}7770[7770]
- Support for Kafka 2.0.0 in kafka output {pull}8399[8399]
- Add setting `setup.kibana.space.id` to support Kibana Spaces {pull}7942[7942]
- Better tracking of number of open file descriptors. {pull}7986[7986]
- Report number of open file handles on Windows. {pull}8329[8329]
- Added the `add_process_metadata` processor to enrich events with process information. {pull}6789[6789]
- Add Beats Central Management {pull}8559[8559]
- Report configured queue type. {pull}8091[8091]
- Enable `host` and `cloud` metadata processors by default. {pull}8596[8596]
*Filebeat*
- Add tag "truncated" to "log.flags" if incoming line is longer than configured limit. {pull}7991[7991]
- Add haproxy module. {pull}8014[8014]
- Add tag "multiline" to "log.flags" if event consists of multiple lines. {pull}7997[7997]
- Release `docker` input as GA. {pull}8328[8328]
- Keep unparsed user agent information in user_agent.original. {pull}7823[7832]
- Added default and TCP parsing formats to HAproxy module {issue}8311[8311] {pull}8637[8637]
- Add Suricata IDS/IDP/NSM module. {issue}8153[8153] {pull}8693[8693]
- Support for Kafka 2.0.0 {pull}8853[8853]
*Heartbeat*
- Heartbeat is marked as GA.
- Add automatic config file reloading. {pull}8023[8023]
- Added autodiscovery support {pull}8415[8415]
- Added support for extra TLS/x509 metadata. {pull}7944[7944]
- Added stats and state metrics for number of monitors and endpoints started. {pull}8621[8621]
*Journalbeat*
- Add journalbeat. {pull}8703[8703]
*Metricbeat*
- Add `replstatus` metricset to MongoDB module {pull}7604[7604]
- Add experimental socket summary metricset to system module {pull}6782[6782]
- Move common kafka fields (broker, topic and partition.id) to the module level to facilitate events correlation {pull}7767[7767]
- Add fields for memory fragmentation, memory allocator stats, copy on write, master-slave status, and active defragmentation to `info` metricset of Redis module. {pull}7695[7695]
- Increase ignore_above for system.process.cmdline to 2048. {pull}8101[8100]
- Add support to renamed fields planned for redis 5.0. {pull}8167[8167]
- Allow TCP helper to support delimiters and graphite module to accept multiple metrics in a single payload. {pull}8278[8278]
- Added 'died' PID state to process_system metricset on system module {pull}8275[8275]
- Add `metrics` metricset to MongoDB module. {pull}7611[7611]
- Added `ccr` metricset to Elasticsearch module. {pull}8335[8335]
- Support for Kafka 2.0.0 {pull}8399[8399]
- Added support for query params in configuration {issue}8286[8286] {pull}8292[8292]
- Add container image for docker metricsets. {issue}8214[8214] {pull}8438[8438]
- Precalculate composed id fields for kafka dashboards. {pull}8504[8504]
- Add support for `full` status page output for php-fpm module as a separate metricset called `process`. {pull}8394[8394]
- Add Kafka dashboard. {pull}8457[8457]
- Release Kafka module as GA. {pull}8854[8854]
*Packetbeat*
- Added DHCP protocol support. {pull}7647[7647]
*Functionbeat*
- Initial version of Functionbeat. {pull}8678[8678]
==== Deprecated
*Heartbeat*
- watch.poll_file is now deprecated and superceded by automatic config file reloading.
*Metricbeat*
- Redis `info` `replication.master_offset` has been deprecated in favor of `replication.master.offset`.{pull}7695[7695]
- Redis `info` clients fields `longest_output_list` and `biggest_input_buf` have been renamed to `max_output_buffer` and `max_input_buffer` based on the names they will have in Redis 5.0, both fields will coexist during a time with the same value {pull}8167[8167].
- Move common kafka fields (broker, topic and partition.id) to the module level {pull}7767[7767].
[[release-notes-6.4.3]]
=== Beats version 6.4.3
https://github.com/elastic/beats/compare/v6.4.2...v6.4.3[View commits]
==== Bugfixes
*Affecting all Beats*
- Fix a race condition with the `add_host_metadata` and the event serialization. {pull}8223[8223] {pull}8653[8653]
- Fix race condition when publishing monitoring data. {pull}8646[8646]
- Fix bug in loading dashboards from zip file. {issue}8051[8051]
- The export config subcommand should not display real value for field reference. {pull}8769[8769]
*Filebeat*
- Fix typo in Filebeat IIS Kibana visualization. {pull}8604[8604]
*Metricbeat*
- Recover metrics for old Apache versions removed by mistake on #6450. {pull}7871[7871]
- Avoid mapping issues in Kubernetes module. {pull}8487[8487]
- Fixed a panic when the KVM module cannot establish a connection to libvirtd. {issue}7792[7792]
[[release-notes-6.4.2]]
=== Beats version 6.4.2
https://github.com/elastic/beats/compare/v6.4.1...v6.4.2[View commits]
==== Bugfixes
*Filebeat*
- Fix some errors happening when stopping syslog input. {pull}8347[8347]
- Fix RFC3339 timezone and nanoseconds parsing with the syslog input. {pull}8346[8346]
*Metricbeat*
- Fix incorrect type conversion of average response time in Haproxy dashboards {pull}8404[8404]
- Fix dropwizard module parsing of metric names. {issue}8365[8365] {pull}6385[8385]
[[release-notes-6.4.1]]
=== Beats version 6.4.1
https://github.com/elastic/beats/compare/v6.4.0...v6.4.1[View commits]
==== Bugfixes
*Affecting all Beats*
- Add backoff support to x-pack monitoring outputs. {issue}7966[7966]
- Removed execute permissions systemd unit file. {pull}7873[7873]
- Fix a race condition with the `add_host_metadata` and the event serialization. {pull}8223[8223]
- Enforce that data used by k8s or docker doesn't use any reference. {pull}8240[8240]
- Implement CheckConfig in RunnerFactory to make autodiscover check configs {pull}7961[7961]
- Make kubernetes autodiscover ignore events with empty container IDs {pull}7971[7971]
*Auditbeat*
- Fixed a concurrent map write panic in the auditd module. {pull}8158[8158]
- Fixed the RPM by designating the config file as configuration data in the RPM spec. {issue}8075[8075]
*Filebeat*
- Fixed a docker input error due to the offset update bug in partial log join.{pull}8177[8177]
- Update CRI format to support partial/full tags. {pull}8265[8265]
*Metricbeat*
- Fixed the location of the modules.d dir in Deb and RPM packages. {issue}8104[8104]
- Fixed the RPM by designating the modules.d config files as configuration data in the RPM spec. {issue}8075[8075]
- Fix golang.heap.gc.cpu_fraction type from long to float in Golang module. {pull}7789[7789]
*Packetbeat*
- Added missing `cmdline` and `client_cmdline` fields to index template. {pull}8258[8258]
[[release-notes-6.4.0]]
=== Beats version 6.4.0
https://github.com/elastic/beats/compare/v6.3.1...v6.4.0[View commits]
==== Known issue
Due to a packaging mistake, the `modules.d` configuration directory is
installed in the wrong path in the Metricbeat DEB and RPM packages. This issue
results in an empty list when you run `metricbeat modules list` and failures
when you try to enable or disable modules. To work around this issue, run the
following command:
[source,sh]
-----------
sudo cp -r /usr/share/metricbeat/modules.d /etc/metricbeat/
-----------
This issue affects all new installations on DEB and RPM. Upgrades will run, but
use old configurations defined in the `modules.d` directory from the previous
installation.
The issue will be fixed in the 6.4.1 release.
==== Breaking changes
*Affecting all Beats*
- Set default kafka version to 1.0.0 in kafka output. Older versions are still supported by configuring the `version` setting. Minimally supported version is 0.11 (older versions might work, but are untested). {pull}7025[7025]
*Heartbeat*
- Rename http.response.status to http.response.status_code to align with ECS. {pull}7274[7274]
- Remove `type` field as not needed. {pull}7307[7307]
*Metricbeat*
- Fixed typo in values for `state_container` `status.phase`, from `terminate` to `terminated`. {pull}6916[6916]
- RabbitMQ management plugin path is now configured at the module level instead of having to do it in each of the metricsets. New `management_path_prefix` option should be used now {pull}7074[7074]
- RabbitMQ node metricset only collects metrics of the instance it connects to, `node.collect: cluster` can be used to collect all nodes as before. {issue}6556[6556] {pull}6971[6971]
- Change http/server metricset to put events by default under http.server and prefix config options with server.. {pull}7100[7100]
- Disable dedotting in docker module configuration. This will change the out-of-the-box behaviour, but not the one of already configured instances. {pull}7485[7485]
- Fix typo in etcd/self metricset fields from *.bandwithrate to *.bandwidthrate. {pull}7456[7456]
- Changed the definition of the `system.cpu.total.pct` and `system.cpu.total.norm.cou` fields to exclude the IOWait time. {pull}7691[7691]
==== Bugfixes
*Affecting all Beats*
- Error out on invalid Autodiscover template conditions settings. {pull}7200[7200]
- Allow to override the `ignore_above` option when defining new field with the type keyword. {pull}7238[7238]
- Fix a panic on the Dissect processor when we have data remaining after the last delimiter. {pull}7449[7449]
- When we fail to build a Kubernetes' indexer or matcher we produce a warning but we don't add them to the execution. {pull}7466[7466]
- Fix default value for logging.files.keepfiles. It was being set to 0 and now
it's set to the documented value of 7. {issue}7494[7494]
- Retain compatibility with older Docker server versions. {issue}7542[7542]
- Fix errors unpacking configs modified via CLI by ignoring `-E key=value` pairs with missing value. {pull}7599[7599]
*Auditbeat*
- Allow `auditbeat setup` to run without requiring elevated privileges for the audit client. {issue}7111[7111]
- Fix goroutine leak that occurred when the auditd module was stopped. {pull}7163[7163]
*Filebeat*
- Fix a data race between stopping and starting of the harvesters. {issue}#6879[6879]
- Fix an issue when parsing ISO8601 dates with timezone definition {issue}7367[7367]
- Fix Grok pattern of MongoDB module. {pull}7568[7568]
- Fix registry duplicates and log resending on upgrade. {issue}7634[7634]
*Metricbeat*
- Fix Windows service metricset when using a 32-bit binary on a 64-bit OS. {pull}7294[7294]
- Do not report Metricbeat container host as hostname in Kubernetes deployment. {issue}7199[7199]
- Ensure metadata updates don't replace existing pod metrics. {pull}7573[7573]
- Fix kubernetes pct fields reporting. {pull}7677[7677]
- Add support for new `kube_node_status_condition` in Kubernetes `state_node`. {pull}7699[7699]
==== Added
*Affecting all Beats*
- Add dissect processor. {pull}6925[6925]
- Add IP-addresses and MAC-addresses to add_host_metadata. {pull}6878[6878]
- Added a seccomp (secure computing) filter on Linux that whitelists the
necessary system calls used by each Beat. {issue}5213[5213]
- Ship fields.yml as part of the binary {pull}4834[4834]
- Added options to dev-tools/cmd/dashboards/export_dashboard.go: -indexPattern to include index-pattern in output, -quiet to be quiet. {pull}7101[7101]
- Add Indexer indexing by pod uid. Enable pod uid metadata gathering in add_kubernetes_metadata. Extended Matcher log_path matching to support volume mounts {pull}7072[7072]
- Add default_fields to Elasticsearch template when connecting to Elasticsearch >= 7.0. {pull}7015[7015]
- Add support for loading a template.json file directly instead of using fields.yml. {pull}7039[7039]
- Add support for keyword multifields in field.yml. {pull}7131[7131]
- Add experimental Jolokia Discovery autodiscover provider. {pull}7141[7141]
- Add owner object info to Kubernetes metadata. {pull}7231[7231]
- Add Beat export dashboard command. {pull}7239[7239]
- Add support for docker autodiscover to monitor containers on host network {pull}6708[6708]
- Add ability to define input configuration as stringified JSON for autodiscover. {pull}7372[7372]
- Add processor definition support for hints builder {pull}7386[7386]
- Add support to disable html escaping in outputs. {pull}7445[7445]
- Refactor error handing in schema.Apply(). {pull}7335[7335]
- Add additional types to Kubernetes metadata {pull}7457[7457]
- Add module state reporting for Beats Monitoring. {pull}7075[7075]
- Release the `rename` processor as GA. {pull}7656[7656]
- Add support for Openstack Nova in `add_cloud_metadata` processor. {pull}7663[7663]
*Auditbeat*
- Added XXH64 hash option for file integrity checks. {pull}7311[7311]
- Added the `show auditd-rules` and `show auditd-status` commands to show kernel rules and status. {pull}7114[7114]
- Add Kubernetes specs for auditbeat file integrity monitoring {pull}7642[7642]
*Filebeat*
- Add Kibana module with log fileset. {pull}7052[7052]
- Support MySQL 5.7.19 by mysql/slowlog {pull}6969[6969]
- Correctly join partial log lines when using `docker` input. {pull}6967[6967]
- Add support for TLS with client authentication to the TCP input {pull}7056[7056]
- Converted part of pipeline from treafik/access metricSet to dissect to improve efficiency. {pull}7209[7209]
- Add GC fileset to the Elasticsearch module. {pull}7305[7305]
- Add Audit log fileset to the Elasticsearch module. {pull}7365[7365]
- Add Slow log fileset to the Elasticsearch module. {pull}7473[7473]
- Add deprecation fileset to the Elasticsearch module. {pull}7474[7474]
- Add `convert_timezone` option to Kafka module to convert dates to UTC. {issue}7546[7546] {pull}7578[7578]
- Add patterns for kafka 1.1 logs. {pull}7608[7608]
- Move debug messages in tcp input source {pull}7712[7712]
*Metricbeat*
- Add experimental Elasticsearch index metricset. {pull}6881[6881]
- Add dashboards and visualizations for haproxy metrics. {pull}6934[6934]
- Add Jolokia agent in proxy mode. {pull}6475[6475]
- Add message rates to the RabbitMQ queue metricset {issue}6442[6442] {pull}6606[6606]
- Add exchanges metricset to the RabbitMQ module {issue}6442[6442] {pull}6607[6607]
- Add Elasticsearch index_summary metricset. {pull}6918[6918]
- Add shard metricset to Elasticsearch module. {pull}7006[7006]
- Add apiserver metricset to Kubernetes module. {pull}7059[7059]
- Add maxmemory to redis info metricset. {pull}7127[7127]
- Set guest as default user in RabbitMQ module. {pull}7107[7107]
- Add postgresql statement metricset. {issue}7048[7048] {pull}7060[7060]
- Update `state_container` metricset to support latest `kube-state-metrics` version. {pull}7216[7216]
- Add TLS support to MongoDB module. {pull}7401[7401]
- Added Traefik module with health metricset. {pull}7413[7413]
- Add Elasticsearch ml_job metricsets. {pull}7196[7196]
- Add support for bearer token files to HTTP helper. {pull}7527[7527]
- Add Elasticsearch index recovery metricset. {pull}7225[7225]
- Add `locks`, `global_locks`, `oplatencies` and `process` fields to `status` metricset of MongoDB module. {pull}7613[7613]
- Run Kafka integration tests on version 1.1.0 {pull}7616[7616]
- Release raid and socket metricset from system module as GA. {pull}7658[7658]
- Release elasticsearch module and all its metricsets as beta. {pull}7662[7662]
- Release munin and traefik module as beta. {pull}7660[7660]
- Add envoyproxy module. {pull}7569[7569]
- Release prometheus collector metricset as GA. {pull}7660[7660]
- Add Elasticsearch `cluster_stats` metricset. {pull}7638[7638]
- Added `basepath` setting for HTTP-based metricsets {pull}7700[7700]
*Packetbeat*
- The process monitor now reports the command-line for all processes, under Linux and Windows. {pull}7135[7135]
- Updated the TLS protocol parser with new cipher suites added to TLS 1.3. {issue}7455[7455]
- Flows are enriched with process information using the process monitor. {pull}7507[7507]
- Added UDP support to process monitor. {pull}7571[7571]
==== Deprecated
*Metricbeat*
- Kubernetes `state_container` `cpu.limit.nanocores` and `cpu.request.nanocores` have been
deprecated in favor of `cpu.*.cores`. {pull}6916[6916]
[[release-notes-6.3.1]]
=== Beats version 6.3.1
@ -194,6 +642,7 @@ https://github.com/elastic/beats/compare/v6.2.3...v6.3.0[View commits]
- Fix map overwrite panics by cloning shared structs before doing the update. {pull}6947[6947]
- Fix delays on autodiscovery events handling caused by blocking runner stops. {pull}7170[7170]
- Do not emit Kubernetes autodiscover events for Pods without IP address. {pull}7235[7235]
- Fix self metrics when containerized {pull}6641[6641]
*Auditbeat*
@ -208,7 +657,6 @@ https://github.com/elastic/beats/compare/v6.2.3...v6.3.0[View commits]
- Commit registry writes to stable storage to avoid corrupt registry files. {pull}6877[6877]
- Fix a parsing issue in the syslog input for RFC3339 timestamp and time with nanoseconds. {pull}7046[7046]
- Fix an issue with an overflowing wait group when using the TCP input. {issue}7202[7202]
- Fix an issue when parsing ISO8601 dates with timezone definition {issue}7367[7367]
*Heartbeat*
@ -362,7 +810,6 @@ https://github.com/elastic/beats/compare/v6.2.3...v6.3.0[View commits]
- Use bookmarks to persist the last published event. {pull}6150[6150]
[[release-notes-6.2.3]]
=== Beats version 6.2.3
https://github.com/elastic/beats/compare/v6.2.2...v6.2.3[View commits]
@ -436,7 +883,7 @@ https://github.com/elastic/beats/compare/v6.1.3...v6.2.0[View commits]
*Auditbeat*
- Fixed an issue where the proctitle value was being truncated. {pull}6080[6080]
- Fixed an issue where values were incorrectly interpretted as hex data. {pull}6080[6080]
- Fixed an issue where values were incorrectly interpreted as hex data. {pull}6080[6080]
- Fixed parsing of the `key` value when multiple keys are present. {pull}6080[6080]
- Fix possible resource leak if file_integrity module is used with config
reloading on Windows or Linux. {pull}6198[6198]
@ -475,7 +922,7 @@ https://github.com/elastic/beats/compare/v6.1.3...v6.2.0[View commits]
- Use structured logging for the metrics that are periodically logged via the
`logging.metrics` feature. {pull}5915[5915]
- Improve Elasticsearch output metrics to count number of dropped and duplicate (if event ID is given) events. {pull}5811[5811]
- Add the abilility for the add_docker_metadata process to enrich based on process ID. {pull}6100[6100]
- Add the ability for the add_docker_metadata process to enrich based on process ID. {pull}6100[6100]
- The `add_docker_metadata` and `add_kubernetes_metadata` processors are now GA, instead of Beta. {pull}6105[6105]
- Update go-ucfg library to support top level key reference and cyclic key reference for the
keystore {pull}6098[6098]
@ -501,7 +948,7 @@ https://github.com/elastic/beats/compare/v6.1.3...v6.2.0[View commits]
- Update the MySQL dashboard to use the Time Series Visual Builder. {pull}5996[5996]
- Add experimental uwsgi module. {pull}6006[6006]
- Docker and Kubernetes modules are now GA, instead of Beta. {pull}6105[6105]
- Support haproxy stats gathering using http (additionaly to tcp socket). {pull}5819[5819]
- Support haproxy stats gathering using http (additionally to tcp socket). {pull}5819[5819]
- Support to optionally 'de dot' keys in http/json metricset to prevent collisions. {pull}5957[5957]
*Packetbeat*
@ -604,7 +1051,7 @@ https://github.com/elastic/beats/compare/v6.0.1...v6.1.0[View commits]
*Affecting all Beats*
- Support dashboard loading without Elasticseach {pull}5653[5653]
- Support dashboard loading without Elasticsearch {pull}5653[5653]
- Changed the hashbang used in the beat helper script from `/bin/bash` to `/usr/bin/env bash`. {pull}5051[5051]
- Changed beat helper script to use `exec` when running the beat. {pull}5051[5051]
- Fix reloader error message to only print on actual error {pull}5066[5066]
@ -845,7 +1292,7 @@ https://github.com/elastic/beats/compare/v6.0.0-beta1...v6.0.0-beta2[View commit
- Added missing mongodb configuration file to the `modules.d` folder. {pull}4870[4870]
- Fix wrong MySQL CRUD queries timelion visualization {pull}4857[4857]
- Add new metrics to CPU metricsset {pull}4969[4969]
- Add new metrics to CPU metricset {pull}4969[4969]
*Packetbeat*
@ -1530,7 +1977,7 @@ https://github.com/elastic/beats/compare/v5.2.2...v5.3.0[View commits]
- Add Filebeat modules for system, apache2, mysql, and nginx. {issue}3159[3159]
- Add the `pipeline` config option at the prospector level, for configuring the Ingest Node pipeline ID. {pull}3433[3433]
- Update regular expressions used for matching file names or lines (multiline, include/exclude functionality) to new matchers improving performance of simple string matches. {pull}3469[3469]
- The `symlinks` and `harverster_limit` settings are now GA, instead of experimental. {pull}3525[3525]
- The `symlinks` and `harvester_limit` settings are now GA, instead of experimental. {pull}3525[3525]
- close_timeout is also applied when the output is blocking. {pull}3511[3511]
- Improve handling of different path variants on Windows. {pull}3781[3781]
- Add multiline.flush_pattern option, for specifying the 'end' of a multiline pattern {pull}4019[4019]
@ -1805,7 +2252,7 @@ The list below covers the changes between 5.0.0-rc1 and 5.0.0 GA only.
- Fix high CPU usage on macOS when encountering processes with long command lines. {issue}2747[2747]
- Fix high value of `system.memory.actual.free` and `system.memory.actual.used`. {issue}2653[2653]
- Change several `OpenProcess` calls on Windows to request the lowest possible access provilege. {issue}1897[1897]
- Change several `OpenProcess` calls on Windows to request the lowest possible access privilege. {issue}1897[1897]
- Fix system.memory.actual.free high value on Windows. {issue}2653[2653]
*Filebeat*
@ -2063,7 +2510,7 @@ https://github.com/elastic/beats/compare/v5.0.0-alpha3...v5.0.0-alpha4[View comm
*Affecting all Beats*
- The topology_expire option of the Elasticserach output was removed. {pull}1907[1907]
- The topology_expire option of the Elasticsearch output was removed. {pull}1907[1907]
*Filebeat*
@ -2560,7 +3007,7 @@ https://github.com/elastic/beats/compare/v1.0.1...v1.1.0[View commits]
- Add multiline support for combining multiple related lines into one event. {issue}461[461]
- Add `exclude_lines` and `include_lines` options for regexp based line filtering. {pull}430[430]
- Add `exclude_files` configuration option. {pull}563[563]
- Add experimental option to enable filebeat publisher pipeline to operate asynchonrously {pull}782[782]
- Add experimental option to enable filebeat publisher pipeline to operate asynchronously {pull}782[782]
*Winlogbeat*
@ -2601,7 +3048,7 @@ https://github.com/elastic/beats/compare/1.0.0-rc2...1.0.0[Check 1.0.0 diff]
*Affecting all Beats*
- Fix random panic on shutdown by calling shutdown handler only once. elastic/filebeat#204
- Fix credentials are not send when pinging an elasticsearch host. elastic/fileabeat#287
- Fix credentials are not send when pinging an elasticsearch host. elastic/filebeat#287
*Filebeat*

View File

@ -1,9 +1,8 @@
BUILD_DIR=$(CURDIR)/build
COVERAGE_DIR=$(BUILD_DIR)/coverage
BEATS=packetbeat filebeat winlogbeat metricbeat heartbeat auditbeat
BEATS?=auditbeat filebeat heartbeat journalbeat metricbeat packetbeat winlogbeat x-pack/functionbeat
PROJECTS=libbeat $(BEATS)
PROJECTS_ENV=libbeat filebeat metricbeat
SNAPSHOT?=yes
PYTHON_ENV?=$(BUILD_DIR)/python-env
VIRTUALENV_PARAMS?=
FIND=find . -type f -not -path "*/vendor/*" -not -path "*/build/*" -not -path "*/.git/*"
@ -12,6 +11,7 @@ GOLINT_REPO=github.com/golang/lint/golint
REVIEWDOG=reviewdog
REVIEWDOG_OPTIONS?=-diff "git diff master"
REVIEWDOG_REPO=github.com/haya14busa/reviewdog/cmd/reviewdog
XPACK_SUFFIX=x-pack/
# Runs complete testsuites (unit, system, integration) for all beats with coverage and race detection.
# Also it builds the docs and the generators
@ -62,6 +62,7 @@ clean:
@rm -rf build
@$(foreach var,$(PROJECTS),$(MAKE) -C $(var) clean || exit 1;)
@$(MAKE) -C generator clean
@-mage -clean 2> /dev/null
# Cleans up the vendor directory from unnecessary files
# This should always be run after updating the dependencies
@ -71,25 +72,42 @@ clean-vendor:
.PHONY: check
check: python-env
@$(foreach var,$(PROJECTS),$(MAKE) -C $(var) check || exit 1;)
@$(foreach var,$(PROJECTS) dev-tools,$(MAKE) -C $(var) check || exit 1;)
@# Checks also python files which are not part of the beats
@$(FIND) -name *.py -exec $(PYTHON_ENV)/bin/autopep8 -d --max-line-length 120 {} \; | (! grep . -q) || (echo "Code differs from autopep8's style" && false)
@# Validate that all updates were committed
@$(MAKE) update
@$(MAKE) check-headers
@git diff | cat
@git update-index --refresh
@git diff-index --exit-code HEAD --
.PHONY: check-headers
check-headers:
@go get github.com/elastic/go-licenser
@go-licenser -d -exclude x-pack
@go-licenser -d -license Elastic x-pack
.PHONY: add-headers
add-headers:
@go get github.com/elastic/go-licenser
@go-licenser -exclude x-pack
@go-licenser -license Elastic x-pack
# Corrects spelling errors
.PHONY: misspell
misspell:
go get github.com/client9/misspell
go get -u github.com/client9/misspell/cmd/misspell
# Ignore Kibana files (.json)
$(FIND) -not -path "*.json" -name '*' -exec misspell -w {} \;
$(FIND) \
-not -path "*.json" \
-not -path "*.log" \
-name '*' \
-exec misspell -w {} \;
.PHONY: fmt
fmt: python-env
@$(foreach var,$(PROJECTS),$(MAKE) -C $(var) fmt || exit 1;)
fmt: add-headers python-env
@$(foreach var,$(PROJECTS) dev-tools,$(MAKE) -C $(var) fmt || exit 1;)
@# Cleans also python files which are not part of the beats
@$(FIND) -name "*.py" -exec $(PYTHON_ENV)/bin/autopep8 --in-place --max-line-length 120 {} \;
@ -98,51 +116,12 @@ lint:
@go get $(GOLINT_REPO) $(REVIEWDOG_REPO)
$(REVIEWDOG) $(REVIEWDOG_OPTIONS)
# Collects all dashboards and generates dashboard folder for https://github.com/elastic/beats-dashboards/tree/master/dashboards
.PHONY: beats-dashboards
beats-dashboards:
@mkdir -p build/dashboards
@$(foreach var,$(BEATS),cp -r $(var)/_meta/kibana/ build/dashboards/$(var) || exit 1;)
# Builds the documents for each beat
.PHONY: docs
docs:
@$(foreach var,$(PROJECTS),BUILD_DIR=${BUILD_DIR} $(MAKE) -C $(var) docs || exit 1;)
sh ./script/build_docs.sh dev-guide github.com/elastic/beats/docs/devguide ${BUILD_DIR}
.PHONY: package-all
package-all: update beats-dashboards
@$(foreach var,$(BEATS),SNAPSHOT=$(SNAPSHOT) $(MAKE) -C $(var) package-all || exit 1;)
@echo "Start building the dashboards package"
@mkdir -p build/upload/
@BUILD_DIR=${BUILD_DIR} UPLOAD_DIR=${BUILD_DIR}/upload SNAPSHOT=$(SNAPSHOT) $(MAKE) -C dev-tools/packer package-dashboards ${BUILD_DIR}/upload/build_id.txt
@mv build/upload build/dashboards-upload
@# Copy build files over to top build directory
@mkdir -p build/upload/
@$(foreach var,$(BEATS),cp -r $(var)/build/upload/ build/upload/$(var) || exit 1;)
@cp -r build/dashboards-upload build/upload/dashboards
@# Run tests on the generated packages.
@go test ./dev-tools/package_test.go -files "${BUILD_DIR}/upload/*/*"
# Upload nightly builds to S3
.PHONY: upload-nightlies-s3
upload-nightlies-s3: all
aws s3 cp --recursive --acl public-read build/upload s3://beats-nightlies
# Run after building to sign packages and publish to APT and YUM repos.
.PHONY: package-upload
upload-package:
$(MAKE) -C dev-tools/packer deb-rpm-s3
# You must export AWS_ACCESS_KEY=<AWS access> and export AWS_SECRET_KEY=<secret>
# before running this make target.
dev-tools/packer/docker/deb-rpm-s3/deb-rpm-s3.sh
.PHONY: release-upload
upload-release:
aws s3 cp --recursive --acl public-read build/upload s3://download.elasticsearch.org/beats/
.PHONY: notice
notice: python-env
@echo "Generating NOTICE"
@ -152,7 +131,7 @@ notice: python-env
.PHONY: python-env
python-env:
@test -d $(PYTHON_ENV) || virtualenv $(VIRTUALENV_PARAMS) $(PYTHON_ENV)
@$(PYTHON_ENV)/bin/pip install -q --upgrade pip autopep8 six
@$(PYTHON_ENV)/bin/pip install -q --upgrade pip autopep8==1.3.5 six
@# Work around pip bug. See: https://github.com/pypa/pip/issues/4464
@find $(PYTHON_ENV) -type d -name dist-packages -exec sh -c "echo dist-packages > {}.pth" ';'
@ -160,3 +139,40 @@ python-env:
.PHONY: test-apm
test-apm:
sh ./script/test_apm.sh
### Packaging targets ####
# Builds a snapshot release.
.PHONY: snapshot
snapshot:
@$(MAKE) SNAPSHOT=true release
# Builds a release.
.PHONY: release
release: beats-dashboards
@$(foreach var,$(BEATS),$(MAKE) -C $(var) release || exit 1;)
@$(foreach var,$(BEATS), \
test -d $(var)/build/distributions && test -n "$$(ls $(var)/build/distributions)" || exit 0; \
mkdir -p build/distributions/$(subst $(XPACK_SUFFIX),'',$(var)) && mv -f $(var)/build/distributions/* build/distributions/$(subst $(XPACK_SUFFIX),'',$(var))/ || exit 1;)
# Builds a snapshot release. The Go version defined in .go-version will be
# installed and used for the build.
.PHONY: release-manager-snapshot
release-manager-snapshot:
@$(MAKE) SNAPSHOT=true release-manager-release
# Builds a snapshot release. The Go version defined in .go-version will be
# installed and used for the build.
.PHONY: release-manager-release
release-manager-release:
./dev-tools/run_with_go_ver $(MAKE) release
# Installs the mage build tool from the vendor directory.
.PHONY: mage
mage:
@go install github.com/elastic/beats/vendor/github.com/magefile/mage
# Collects dashboards from all Beats and generates a zip file distribution.
.PHONY: beats-dashboards
beats-dashboards: mage update
@mage packageBeatDashboards

View File

@ -116,11 +116,98 @@ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------
Dependency: github.com/boltdb/bolt
Version: v1.3.1
Revision: 2f1ce7a837dcb8da3ec595b1dac9d0632f0f99e8
Dependency: github.com/aws/aws-lambda-go
Version: =v1.6.0
Revision: 2d482ef09017ae953b1e8d5a6ddac5b696663a3c
License type (autodetected): Apache-2.0
./vendor/github.com/aws/aws-lambda-go/LICENSE:
--------------------------------------------------------------------
Apache License 2.0
--------------------------------------------------------------------
Dependency: github.com/aws/aws-lambda-go
Version: =v1.6.0
Revision: 2d482ef09017ae953b1e8d5a6ddac5b696663a3c
License type (autodetected): MIT
./vendor/github.com/boltdb/bolt/LICENSE:
./vendor/github.com/aws/aws-lambda-go/LICENSE-LAMBDACODE:
--------------------------------------------------------------------
MIT No Attribution
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------
Dependency: github.com/aws/aws-sdk-go-v2
Version: v2.0.0-preview.5
Revision: d52522b5f4b95591ff6528d7c54923951aadf099
License type (autodetected): Apache-2.0
./vendor/github.com/aws/aws-sdk-go-v2/LICENSE.txt:
--------------------------------------------------------------------
Apache License 2.0
-------NOTICE.txt-----
AWS SDK for Go
Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Copyright 2014-2015 Stripe, Inc.
--------------------------------------------------------------------
Dependency: github.com/awslabs/goformation
Revision: c5f655c41370a0bec733fa251fd2ecd4e3f0dfe3
License type (autodetected): Apache-2.0
./vendor/github.com/awslabs/goformation/LICENSE:
--------------------------------------------------------------------
Apache License 2.0
-------NOTICE-----
GoFormation
Copyright 2011-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
--------------------------------------------------------------------
Dependency: github.com/bsm/sarama-cluster
Revision: 7e67d87a6b3f83fe08c096fd084691bd9dca112f
License type (autodetected): MIT
./vendor/github.com/bsm/sarama-cluster/LICENSE:
--------------------------------------------------------------------
(The MIT License)
Copyright (c) 2017 Black Square Media Ltd
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
'Software'), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------
Dependency: github.com/coreos/bbolt
Revision: af9db2027c98c61ecd8e17caa5bd265792b9b9a2
License type (autodetected): MIT
./vendor/github.com/coreos/bbolt/LICENSE:
--------------------------------------------------------------------
The MIT License (MIT)
@ -143,6 +230,37 @@ COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------
Dependency: github.com/coreos/go-systemd
Version: v17
Revision: 39ca1b05acc7ad1220e09f133283b8859a8b71ab
License type (autodetected): Apache-2.0
./vendor/github.com/coreos/go-systemd/LICENSE:
--------------------------------------------------------------------
Apache License 2.0
-------NOTICE-----
CoreOS Project
Copyright 2018 CoreOS, Inc
This product includes software developed at CoreOS, Inc.
(http://www.coreos.com/).
--------------------------------------------------------------------
Dependency: github.com/coreos/pkg
Revision: 97fdf19511ea361ae1c100dd393cc47f8dcfa1e1
License type (autodetected): Apache-2.0
./vendor/github.com/coreos/pkg/LICENSE:
--------------------------------------------------------------------
Apache License 2.0
-------NOTICE-----
CoreOS Project
Copyright 2014 CoreOS, Inc
This product includes software developed at CoreOS, Inc.
(http://www.coreos.com/).
--------------------------------------------------------------------
Dependency: github.com/davecgh/go-spew
Version: v1.1.0
@ -373,8 +491,8 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
--------------------------------------------------------------------
Dependency: github.com/elastic/go-libaudit
Version: v0.2.1
Revision: 55225d06b15c74082f9a7af75aa4284dbe48d20a
Version: v0.4.0
Revision: 39073a2988f718067d85d27a4d18b1b57de5d947
License type (autodetected): Apache-2.0
./vendor/github.com/elastic/go-libaudit/LICENSE.txt:
--------------------------------------------------------------------
@ -396,10 +514,25 @@ License type (autodetected): Apache-2.0
Apache License 2.0
--------------------------------------------------------------------
Dependency: github.com/elastic/go-seccomp-bpf
Revision: 5bed103a93993f24c58dd267faaab582f2373e31
License type (autodetected): Apache-2.0
./vendor/github.com/elastic/go-seccomp-bpf/LICENSE.txt:
--------------------------------------------------------------------
Apache License 2.0
-------NOTICE.txt-----
Elastic go-seccomp-bpf
Copyright 2018 Elasticsearch B.V.
This product includes software developed at
Elasticsearch, B.V. (https://www.elastic.co/).
--------------------------------------------------------------------
Dependency: github.com/elastic/go-structform
Version: v0.0.3
Revision: 0a66add879601f69f55663f4c913c72988218982
Version: v0.0.5
Revision: 1425975cf4eb470099fcf02cbe9389cf3a7028a3
License type (autodetected): Apache-2.0
./vendor/github.com/elastic/go-structform/LICENSE:
--------------------------------------------------------------------
@ -408,7 +541,7 @@ Apache License 2.0
--------------------------------------------------------------------
Dependency: github.com/elastic/go-sysinfo
Revision: fda017eee28b7420d931e08c7361c1c17f516aa2
Revision: 7b021494a9562d0c3f0422d49b9980709c5650e9
License type (autodetected): Apache-2.0
./vendor/github.com/elastic/go-sysinfo/LICENSE.txt:
--------------------------------------------------------------------
@ -423,8 +556,8 @@ Elasticsearch, B.V. (https://www.elastic.co/).
--------------------------------------------------------------------
Dependency: github.com/elastic/go-txfile
Version: v0.0.1
Revision: 7e7e33cc236f30fff545f3ee2c35ada5b70b6b13
Version: v0.0.3
Revision: 389b527ad365f6fc6cf5fa7e0ba5a2294ad2f3ed
License type (autodetected): Apache-2.0
./vendor/github.com/elastic/go-txfile/LICENSE:
--------------------------------------------------------------------
@ -433,8 +566,8 @@ Apache License 2.0
--------------------------------------------------------------------
Dependency: github.com/elastic/go-ucfg
Version: v0.5.1
Revision: 0ba28e36add27704e6b49a7ed8557989a8f4a635
Version: v0.6.5
Revision: 92d43887f91851c9936621665af7f796f4d03412
License type (autodetected): Apache-2.0
./vendor/github.com/elastic/go-ucfg/LICENSE:
--------------------------------------------------------------------
@ -443,7 +576,8 @@ Apache License 2.0
--------------------------------------------------------------------
Dependency: github.com/elastic/go-windows
Revision: a730c8b4e08aef7e1ebb642928bf862996ad2383
Version: 7b021494a9562d0c3f0422d49b9980709c5650e9
Revision: bb1581babc04d5cb29a2bfa7a9ac6781c730c8dd
License type (autodetected): Apache-2.0
./vendor/github.com/elastic/go-windows/LICENSE.txt:
--------------------------------------------------------------------
@ -477,8 +611,8 @@ subcomponents is subject to the terms and conditions of the
subcomponent's license, as noted in the LICENSE file.
--------------------------------------------------------------------
Dependency: github.com/ericchiang/k8s
Version: v1.0.0
Revision: 5912993f00cb7c971aaa54529a06bd3eecd6c3d4
Version: =v1.0.0/in-cluster-ipv6
Revision: 33b346590d1dd4eaac217471671f736bcdab492d
License type (autodetected): Apache-2.0
./vendor/github.com/ericchiang/k8s/LICENSE:
--------------------------------------------------------------------
@ -515,7 +649,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------
Dependency: github.com/fsnotify/fsevents
Revision: 70114c7d2e1e4d1ae5179b285d65ea21aae111cc
Revision: e1d381a4d27063baac2e9d3c5887ceb4ab059287
License type (autodetected): BSD-3-Clause
./vendor/github.com/fsnotify/fsevents/LICENSE:
--------------------------------------------------------------------
@ -648,6 +782,14 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------
Dependency: github.com/go-ini/ini
License type (autodetected): Apache-2.0
./vendor/github.com/go-ini/ini/LICENSE:
--------------------------------------------------------------------
Apache License 2.0
--------------------------------------------------------------------
Dependency: github.com/go-ole/go-ole
Revision: de8695c8edbf8236f30d6e1376e20b198a028d42
@ -1069,6 +1211,77 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
--------------------------------------------------------------------
Dependency: github.com/gofrs/uuid
Version: 3.1.1
Revision: 47cd1dca1a6e7f807d5a492bd7e7f41d0855b5a1
License type (autodetected): MIT
./vendor/github.com/gofrs/uuid/LICENSE:
--------------------------------------------------------------------
Copyright (C) 2013-2018 by Maxim Bublis <b@codemonkey.ru>
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------
Dependency: github.com/gogo/protobuf
Revision: 636bf0302bc95575d69441b25a2603156ffdddf1
License type (autodetected): BSD-3-Clause
./metricbeat/vendor/github.com/gogo/protobuf/LICENSE:
--------------------------------------------------------------------
Protocol Buffers for Go with Gadgets
Copyright (c) 2013, The GoGo Authors. All rights reserved.
http://github.com/gogo/protobuf
Go support for Protocol Buffers - Google's data interchange format
Copyright 2010 The Go Authors. All rights reserved.
https://github.com/golang/protobuf
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------
Dependency: github.com/golang/protobuf
Revision: 2bba0603135d7d7f5cb73b2125beeda19c09f4ef
@ -1231,6 +1444,41 @@ License type (autodetected): Apache-2.0
Apache License 2.0
--------------------------------------------------------------------
Dependency: github.com/imdario/mergo
Revision: 9f23e2d6bd2a77f959b2bf6acdbefd708a83a4a4
License type (autodetected): BSD-3-Clause
./vendor/github.com/imdario/mergo/LICENSE:
--------------------------------------------------------------------
Copyright (c) 2013 Dario Castañé. All rights reserved.
Copyright (c) 2012 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------
Dependency: github.com/inconshreveable/mousetrap
Revision: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
@ -1240,6 +1488,52 @@ License type (autodetected): Apache-2.0
Apache License 2.0
--------------------------------------------------------------------
Dependency: github.com/insomniacslk/dhcp
Revision: 633285ba52b2a67b98a3026eb87ee1a76ab60f3c
License type (autodetected): BSD-3-Clause
./vendor/github.com/insomniacslk/dhcp/LICENSE:
--------------------------------------------------------------------
BSD 3-Clause License
Copyright (c) 2018, Andrea Barberio
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------
Dependency: github.com/jmespath/go-jmespath
Version: 0.2.2
Revision: 3433f3ea46d9f8019119e7dd41274e112a2359a9
License type (autodetected): Apache-2.0
./vendor/github.com/jmespath/go-jmespath/LICENSE:
--------------------------------------------------------------------
Apache License 2.0
--------------------------------------------------------------------
Dependency: github.com/joeshaw/multierror
Revision: 69b34d4ec901851247ae7e77d33909caf9df99ed
@ -1268,6 +1562,33 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
--------------------------------------------------------------------
Dependency: github.com/jstemmer/go-junit-report
Revision: 385fac0ced9acaae6dc5b39144194008ded00697
License type (autodetected): MIT
./vendor/github.com/jstemmer/go-junit-report/LICENSE:
--------------------------------------------------------------------
Copyright (c) 2012 Joel Stemmer
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------
Dependency: github.com/klauspost/compress
Revision: 14c9a76e3c95e47f8ccce949bba2c1101a8b85e6
@ -1366,6 +1687,16 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------
Dependency: github.com/kubernetes/apimachinery
Version: kubernetes-1.11.1
Revision: 103fd098999dc9c0c88536f5c9ad2e5da39373ae
License type (autodetected): Apache-2.0
./metricbeat/vendor/github.com/kubernetes/apimachinery/LICENSE:
--------------------------------------------------------------------
Apache License 2.0
--------------------------------------------------------------------
Dependency: github.com/lib/pq
Revision: 2704adc878c21e1329f46f6e56a1c387d788ff94
@ -1381,6 +1712,15 @@ The above copyright notice and this permission notice shall be included in all c
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------
Dependency: github.com/magefile/mage
Revision: 5e51f9ad1ed0886c5d06a8c46a09703cfc4d9034
License type (autodetected): Apache-2.0
./vendor/github.com/magefile/mage/LICENSE:
--------------------------------------------------------------------
Apache License 2.0
--------------------------------------------------------------------
Dependency: github.com/mattn/go-colorable
Revision: 941b50ebc6efddf4c41c8e4537a5f68a4e686b24
@ -1467,7 +1807,8 @@ SOFTWARE.
--------------------------------------------------------------------
Dependency: github.com/miekg/dns
Revision: 5d001d020961ae1c184f9f8152fdc73810481677
Version: v1.0.8
Revision: 5a2b9fab83ff0f8bfc99684bd5f43a37abe560f1
License type (autodetected): BSD-3-Clause
./vendor/github.com/miekg/dns/LICENSE:
--------------------------------------------------------------------
@ -1560,6 +1901,15 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
--------------------------------------------------------------------
Dependency: github.com/OneOfOne/xxhash
Revision: 74ace4fe5525ef62ce28d5093d6b0faaa6a575f3
License type (autodetected): Apache-2.0
./vendor/github.com/OneOfOne/xxhash/LICENSE:
--------------------------------------------------------------------
Apache License 2.0
--------------------------------------------------------------------
Dependency: github.com/opencontainers/go-digest
Revision: eaa60544f31ccf3b0653b1a118b76d33418ff41b
@ -1901,36 +2251,115 @@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------
Dependency: github.com/satori/go.uuid
Revision: 5bf94b69c6b68ee1b541973bb8e1144db23a194b
License type (autodetected): MIT
./vendor/github.com/satori/go.uuid/LICENSE:
Dependency: github.com/sanathkr/go-yaml
Version: v2
Revision: ed9d249f429b3f5a69f80a7abef6bfce81fef894
License type (autodetected): Apache-2.0
./vendor/github.com/sanathkr/go-yaml/LICENSE:
--------------------------------------------------------------------
Copyright (C) 2013-2016 by Maxim Bublis <b@codemonkey.ru>
Apache License 2.0
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
--------------------------------------------------------------------
Dependency: github.com/sanathkr/go-yaml
Version: v2
Revision: ed9d249f429b3f5a69f80a7abef6bfce81fef894
License type (autodetected): MIT
./vendor/github.com/sanathkr/go-yaml/LICENSE.libyaml:
--------------------------------------------------------------------
The following files were ported to Go from C files of libyaml, and thus
are still covered by their original copyright and license:
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
apic.go
emitterc.go
parserc.go
readerc.go
scannerc.go
writerc.go
yamlh.go
yamlprivateh.go
Copyright (c) 2006 Kirill Simonov
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
--------------------------------------------------------------------
Dependency: github.com/sanathkr/yaml
Revision: 0056894fa522748ca336761ffeeeb6bbae654d07
License type (autodetected): MIT
./vendor/github.com/sanathkr/yaml/LICENSE:
--------------------------------------------------------------------
The MIT License (MIT)
Copyright (c) 2014 Sam Ghods
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Copyright (c) 2012 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------
Dependency: github.com/shirou/gopsutil
Version: v2.18.01
Revision: c432be29ccce470088d07eea25b3ea7e68a8afbb
Version: v2.18.06
Revision: 4a180b209f5f494e5923cfce81ea30ba23915877
License type (autodetected): BSD-3-Clause
./vendor/github.com/shirou/gopsutil/LICENSE:
--------------------------------------------------------------------
@ -1997,12 +2426,12 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------
Dependency: github.com/Shopify/sarama
Version: v1.12/enh/offset-replica-id
Revision: c292021939f5aba53b3ffc2cb09c7aadb32a42df
Version: =v1.18.0/enh/offset-replica-id
Revision: 0143592836b090a1b481def4d902cfb3c5c05ae5
License type (autodetected): MIT
./vendor/github.com/Shopify/sarama/LICENSE:
--------------------------------------------------------------------
Copyright (c) 2013 Evan Huus
Copyright (c) 2013 Shopify
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
@ -2124,14 +2553,14 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------
Dependency: github.com/stretchr/objx
Revision: b8b73a35e9830ae509858c10dec5866b4d5c8bff
License type (autodetected): MIT
./vendor/github.com/stretchr/objx/LICENSE.md:
./vendor/github.com/stretchr/objx/LICENSE:
--------------------------------------------------------------------
objx - by Mat Ryer and Tyler Bunnell
The MIT License (MIT)
The MIT License
Copyright (c) 2014 Stretchr, Inc.
Copyright (c) 2017-2018 objx contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
@ -2153,8 +2582,8 @@ SOFTWARE.
--------------------------------------------------------------------
Dependency: github.com/stretchr/testify
Version: v1.2.0
Revision: b91bfb9ebec76498946beb6af7c0230c7cc7ba6c
Version: v1.2.2
Revision: f35b8ab0b5a2cef36673838d662e249dd9c94686
License type (autodetected): MIT
./vendor/github.com/stretchr/testify/LICENSE:
--------------------------------------------------------------------
@ -2490,7 +2919,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------
Dependency: golang.org/x/sys
Revision: 37707fdb30a5b38865cfb95e5aab41707daec7fd
Revision: 0ffbfd41fbef8ffcf9b62b0b0aa3a5873ed7a4fe
License type (autodetected): BSD-3-Clause
./vendor/golang.org/x/sys/LICENSE:
--------------------------------------------------------------------
@ -2592,8 +3021,8 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------
Dependency: golang.org/x/tools
Version: release-branch.go1.9
Revision: 5d2fd3ccab986d52112bf301d47a819783339d0e
Version: release-branch.go1.10
Revision: 156d532d4f67148ceab07c3b59ed7fa13bdbf00c
License type (autodetected): BSD-3-Clause
./vendor/golang.org/x/tools/LICENSE:
--------------------------------------------------------------------
@ -2660,6 +3089,41 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------
Dependency: gopkg.in/inf.v0
Revision: d2d2541c53f18d2a059457998ce2876cc8e67cbf
License type (autodetected): BSD-3-Clause
./metricbeat/vendor/gopkg.in/inf.v0/LICENSE:
--------------------------------------------------------------------
Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go
Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------
Dependency: gopkg.in/mgo.v2
Revision: 3f83fa5005286a7fe593b055f0d7771a7dce4655
@ -2761,40 +3225,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------
Dependency: gopkg.in/yaml.v2
Revision: cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b
License type (autodetected): MIT
./vendor/gopkg.in/yaml.v2/LICENSE.libyaml:
License type (autodetected): Apache-2.0
./vendor/gopkg.in/yaml.v2/LICENSE:
--------------------------------------------------------------------
The following files were ported to Go from C files of libyaml, and thus
are still covered by their original copyright and license:
Apache License 2.0
apic.go
emitterc.go
parserc.go
readerc.go
scannerc.go
writerc.go
yamlh.go
yamlprivateh.go
Copyright (c) 2006 Kirill Simonov
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
--------------------------------------------------------------------
Dependency: gopkg.in/yaml.v2

View File

@ -11,7 +11,7 @@
# Usage and Features:
# - Two users exist: Administrator and Vagrant. Both have the password: vagrant
# - Use 'vagrant ssh' to open a Windows command prompt.
# - Use 'vagrant rdp' to open a Windows Remote Deskop session. Mac users must
# - Use 'vagrant rdp' to open a Windows Remote Desktop session. Mac users must
# install the Microsoft Remote Desktop Client from the App Store.
# - There is a desktop shortcut labeled "Beats Shell" that opens a command prompt
# to C:\Gopath\src\github.com\elastic\beats where the code is mounted.
@ -26,6 +26,8 @@
# - Folder syncing doesn't work well. Consider copying the files into the box or
# cloning the project inside the box.
GO_VERSION = File.read(File.join(File.dirname(__FILE__), ".go-version")).strip
# Provisioning for Windows PowerShell
$winPsProvision = <<SCRIPT
echo 'Creating github.com\elastic in the GOPATH'
@ -33,17 +35,28 @@ New-Item -itemtype directory -path "C:\\Gopath\\src\\github.com\\elastic" -force
echo "Symlinking C:\\Vagrant to C:\\Gopath\\src\\github.com\\elastic"
cmd /c mklink /d C:\\Gopath\\src\\github.com\\elastic\\beats \\\\vboxsvr\\vagrant
echo "Installing gvm to manage go version"
[Net.ServicePointManager]::SecurityProtocol = "tls12"
Invoke-WebRequest -URI https://github.com/andrewkroh/gvm/releases/download/v0.0.5/gvm-windows-amd64.exe -Outfile C:\Windows\System32\gvm.exe
C:\Windows\System32\gvm.exe --format=powershell #{GO_VERSION} | Invoke-Expression
go version
echo "Configure environment variables"
[System.Environment]::SetEnvironmentVariable("GOROOT", "C:\\Users\\vagrant\\.gvm\\versions\\go#{GO_VERSION}.windows.amd64", [System.EnvironmentVariableTarget]::Machine)
[System.Environment]::SetEnvironmentVariable("PATH", "$env:GOROOT\\bin;$env:PATH", [System.EnvironmentVariableTarget]::Machine)
echo "Creating Beats Shell desktop shortcut"
$WshShell = New-Object -comObject WScript.Shell
$Shortcut = $WshShell.CreateShortcut("$Home\\Desktop\\Beats Shell.lnk")
$Shortcut.TargetPath = "cmd.exe"
$Shortcut.Arguments = "/K cd /d C:\\Gopath\\src\\github.com\\elastic\\beats"
$Shortcut.Arguments = '/c "SET GOROOT=C:\\Users\\vagrant\\.gvm\\versions\\go#{GO_VERSION}.windows.amd64&PATH=C:\\Users\\vagrant\\.gvm\\versions\\go#{GO_VERSION}.windows.amd64\\bin;%PATH%" && START'
$Shortcut.WorkingDirectory = "C:\\Gopath\\src\\github.com\\elastic\\beats"
$Shortcut.Save()
echo "Disable automatic updates"
$AUSettigns = (New-Object -com "Microsoft.Update.AutoUpdate").Settings
$AUSettigns.NotificationLevel = 1
$AUSettigns.Save()
$AUSettings = (New-Object -com "Microsoft.Update.AutoUpdate").Settings
$AUSettings.NotificationLevel = 1
$AUSettings.Save()
SCRIPT
# Provisioning for Unix/Linux
@ -63,7 +76,7 @@ if [ ! -e "~/bin/gvm" ]; then
chmod +x ~/bin/gvm
echo 'export GOPATH=$HOME/go' >> ~/.bash_profile
echo 'export PATH=$HOME/bin:$GOPATH/bin:$PATH' >> ~/.bash_profile
echo 'eval "$(gvm 1.9.4)"' >> ~/.bash_profile
echo 'eval "$(gvm #{GO_VERSION})"' >> ~/.bash_profile
fi
SCRIPT
@ -132,8 +145,19 @@ Vagrant.configure(2) do |config|
c.vm.synced_folder ".", "/vagrant", type: "virtualbox"
end
config.vm.define "fedora26", primary: true do |c|
c.vm.box = "bento/fedora-26"
config.vm.define "centos6", primary: true do |c|
c.vm.box = "bento/centos-6.9"
c.vm.network :forwarded_port, guest: 22, host: 2229, id: "ssh", auto_correct: true
c.vm.provision "shell", inline: $unixProvision, privileged: false
c.vm.provision "shell", inline: $linuxGvmProvision, privileged: false
c.vm.provision "shell", inline: "yum install -y make gcc python-pip python-virtualenv git"
c.vm.synced_folder ".", "/vagrant", type: "virtualbox"
end
config.vm.define "fedora27", primary: true do |c|
c.vm.box = "bento/fedora-27"
c.vm.network :forwarded_port, guest: 22, host: 2227, id: "ssh", auto_correct: true
c.vm.provision "shell", inline: $unixProvision, privileged: false
@ -143,6 +167,49 @@ Vagrant.configure(2) do |config|
c.vm.synced_folder ".", "/vagrant", type: "virtualbox"
end
config.vm.define "archlinux", primary: true do |c|
c.vm.box = "archlinux/archlinux"
c.vm.network :forwarded_port, guest: 22, host: 2228, id: "ssh", auto_correct: true
c.vm.provision "shell", inline: $unixProvision, privileged: false
c.vm.provision "shell", inline: $linuxGvmProvision, privileged: false
c.vm.provision "shell", inline: "pacman -Sy && pacman -S --noconfirm make gcc python-pip python-virtualenv git"
c.vm.synced_folder ".", "/vagrant", type: "virtualbox"
end
config.vm.define "ubuntu1804", primary: true do |c|
c.vm.box = "ubuntu/bionic64"
c.vm.network :forwarded_port, guest: 22, host: 2229, id: "ssh", auto_correct: true
c.vm.provision "shell", inline: $unixProvision, privileged: false
c.vm.provision "shell", inline: $linuxGvmProvision, privileged: false
c.vm.provision "shell", inline: "apt-get update && apt-get install -y make gcc python-pip python-virtualenv git"
c.vm.synced_folder ".", "/vagrant", type: "virtualbox"
end
config.vm.define "sles12", primary: true do |c|
c.vm.box = "elastic/sles-12-x86_64"
c.vm.network :forwarded_port, guest: 22, host: 2230, id: "ssh", auto_correct: true
c.vm.provision "shell", inline: $unixProvision, privileged: false
c.vm.provision "shell", inline: $linuxGvmProvision, privileged: false
c.vm.provision "shell", inline: "pip install virtualenv"
c.vm.synced_folder ".", "/vagrant", type: "virtualbox"
end
# Windows Server 2016
config.vm.define "win2016", primary: true do |machine|
machine.vm.box = "elastic/windows-2016-x86_64"
machine.vm.provision "shell", inline: $winPsProvision
machine.vm.provider "virtualbox" do |v|
v.memory = 4096
end
end
end
# -*- mode: ruby -*-

View File

@ -1,5 +1,4 @@
build
_meta/kibana
_meta/beat.yml
_meta/beat.reference.yml
module/*/_meta/config.yml

View File

@ -1,4 +1,4 @@
FROM golang:1.9.2
FROM golang:1.10.6
MAINTAINER Nicolas Ruflin <ruflin@elastic.co>
RUN set -x && \

View File

@ -1,78 +1,26 @@
BEAT_NAME=auditbeat
BEAT_TITLE=Auditbeat
BEAT_DESCRIPTION=Audit the activities of users and processes on your system.
SYSTEM_TESTS=true
TEST_ENVIRONMENT?=true
GOX_OS?=linux windows ## @Building List of all OS to be supported by "make crosscompile".
DEV_OS?=linux
TESTING_ENVIRONMENT?=latest
ES_BEATS?=..
# Path to the libbeat Makefile
include ${ES_BEATS}/libbeat/scripts/Makefile
# This is called by the beats packer before building starts
.PHONY: before-build
before-build:
@cat ${ES_BEATS}/auditbeat/_meta/common.p1.yml \
<(go run scripts/generate_config.go -os windows -concat) \
${ES_BEATS}/auditbeat/_meta/common.p2.yml \
${ES_BEATS}/libbeat/_meta/config.yml > \
${PREFIX}/${BEAT_NAME}-win.yml
@cat ${ES_BEATS}/auditbeat/_meta/common.reference.yml \
<(go run scripts/generate_config.go -os windows -concat -ref) \
${ES_BEATS}/libbeat/_meta/config.reference.yml > \
${PREFIX}/${BEAT_NAME}-win.reference.yml
@cat ${ES_BEATS}/auditbeat/_meta/common.p1.yml \
<(go run scripts/generate_config.go -os darwin -concat) \
${ES_BEATS}/auditbeat/_meta/common.p2.yml \
${ES_BEATS}/libbeat/_meta/config.yml > \
${PREFIX}/${BEAT_NAME}-darwin.yml
@cat ${ES_BEATS}/auditbeat/_meta/common.reference.yml \
<(go run scripts/generate_config.go -os darwin -concat -ref) \
${ES_BEATS}/libbeat/_meta/config.reference.yml > \
${PREFIX}/${BEAT_NAME}-darwin.reference.yml
@cat ${ES_BEATS}/auditbeat/_meta/common.p1.yml \
<(go run scripts/generate_config.go -os linux -arch amd64 -concat) \
${ES_BEATS}/auditbeat/_meta/common.p2.yml \
${ES_BEATS}/libbeat/_meta/config.yml > \
${PREFIX}/${BEAT_NAME}-linux.yml
@cat ${ES_BEATS}/auditbeat/_meta/common.reference.yml \
<(go run scripts/generate_config.go -os linux -concat -ref) \
${ES_BEATS}/libbeat/_meta/config.reference.yml > \
${PREFIX}/${BEAT_NAME}-linux.reference.yml
@cat ${ES_BEATS}/auditbeat/_meta/common.p1.yml \
<(go run scripts/generate_config.go -os linux -arch i386 -concat) \
${ES_BEATS}/auditbeat/_meta/common.p2.yml \
${ES_BEATS}/libbeat/_meta/config.yml > \
${PREFIX}/${BEAT_NAME}-linux-386.yml
@cat ${ES_BEATS}/auditbeat/_meta/common.reference.yml \
<(go run scripts/generate_config.go -os linux -concat -ref) \
${ES_BEATS}/libbeat/_meta/config.reference.yml > \
${PREFIX}/${BEAT_NAME}-linux-386.reference.yml
# Collects all dependencies and then calls update
.PHONY: collect
collect: fields collect-docs configs kibana
# Collects all module and metricset fields
.PHONY: fields
fields: python-env
@mkdir -p _meta
@cp _meta/fields.common.yml _meta/fields.generated.yml
@${PYTHON_ENV}/bin/python ${ES_BEATS}/metricbeat/scripts/fields_collector.py >> _meta/fields.generated.yml
collect: collect-docs configs kibana
# Collects all module configs
.PHONY: configs
configs: python-env
@cat ${ES_BEATS}/auditbeat/_meta/common.p1.yml \
<(go run scripts/generate_config.go -os ${DEV_OS} -concat) \
${ES_BEATS}/auditbeat/_meta/common.p2.yml > _meta/beat.yml
<(go run scripts/generate_config.go -os ${DEV_OS} -concat) \
${ES_BEATS}/auditbeat/_meta/common.p2.yml > _meta/beat.yml
@cat ${ES_BEATS}/auditbeat/_meta/common.reference.yml \
<(go run scripts/generate_config.go -os ${DEV_OS} -ref -concat) > _meta/beat.reference.yml
<(go run scripts/generate_config.go -os ${DEV_OS} -ref -concat) > _meta/beat.reference.yml
# Collects all module docs
.PHONY: collect-docs
@ -85,7 +33,6 @@ collect-docs: python-env
# Collects all module dashboards
.PHONY: kibana
kibana:
@-rm -rf _meta/kibana
@mkdir -p _meta/kibana
@-cp -pr module/*/_meta/kibana _meta/
@-rm -rf _meta/kibana.generated
@mkdir -p _meta/kibana.generated
@-cp -pr module/*/_meta/kibana/* _meta/kibana.generated

View File

@ -38,6 +38,8 @@ auditbeat.modules:
rate_limit: 0
include_raw_message: false
include_warnings: false
# Load audit rules from separate files. Same format as audit.rules(7).
audit_rule_files: [ '${path.config}/audit.rules.d/*.conf' ]
audit_rules: |
## Define audit rules here.
## Create file watches (-w) or syscall audits (-a or -A). Uncomment these
@ -96,7 +98,7 @@ auditbeat.modules:
# Hash types to compute when the file changes. Supported types are
# blake2b_256, blake2b_384, blake2b_512, md5, sha1, sha224, sha256, sha384,
# sha512, sha512_224, sha512_256, sha3_224, sha3_256, sha3_384 and sha3_512.
# sha512, sha512_224, sha512_256, sha3_224, sha3_256, sha3_384, sha3_512, and xxh64.
# Default is sha1.
hash_types: [sha1]
@ -221,8 +223,8 @@ auditbeat.modules:
#
# event -> filter1 -> event1 -> filter2 ->event2 ...
#
# The supported processors are drop_fields, drop_event, include_fields, and
# add_cloud_metadata.
# The supported processors are drop_fields, drop_event, include_fields,
# decode_json_fields, and add_cloud_metadata.
#
# For example, you can use the following processors to keep the fields that
# contain CPU load percentages, but remove the fields that contain CPU ticks
@ -250,6 +252,14 @@ auditbeat.modules:
# - from: "a"
# to: "b"
#
# The following example tokenizes the string into fields:
#
#processors:
#- dissect:
# tokenizer: "%{key1} - %{key2}"
# field: "message"
# target_prefix: "dissect"
#
# The following example enriches each event with metadata from the cloud
# provider about the host machine. It works on EC2, GCE, DigitalOcean,
# Tencent Cloud, and Alibaba Cloud.
@ -288,7 +298,31 @@ auditbeat.modules:
#
#processors:
#- add_docker_metadata: ~
#- add_host_metadata: ~
#
# The following example enriches each event with host metadata.
#
#processors:
#- add_host_metadata:
# netinfo.enabled: false
#
# The following example enriches each event with process metadata using
# process IDs included in the event.
#
#processors:
#- add_process_metadata:
# match_pids: ["system.process.ppid"]
# target: system.process.parent
#
# The following example decodes fields containing JSON strings
# and replaces the strings with valid JSON objects.
#
#processors:
#- decode_json_fields:
# fields: ["field1", "field2", ...]
# process_array: false
# max_depth: 1
# target: ""
# overwrite_keys: false
#============================= Elastic Cloud ==================================
@ -321,6 +355,9 @@ output.elasticsearch:
# Set gzip compression level.
#compression_level: 0
# Configure escaping html symbols in strings.
#escape_html: true
# Optional protocol and basic auth credentials.
#protocol: "https"
#username: "elastic"
@ -426,6 +463,9 @@ output.elasticsearch:
# Set gzip compression level.
#compression_level: 3
# Configure escaping html symbols in strings.
#escape_html: true
# Optional maximum time to live for a connection to Logstash, after which the
# connection will be re-established. A value of `0s` (the default) will
# disable this feature.
@ -502,6 +542,21 @@ output.elasticsearch:
# never, once, and freely. Default is never.
#ssl.renegotiation: never
# The number of times to retry publishing an event after a publishing failure.
# After the specified number of retries, the events are typically dropped.
# Some Beats, such as Filebeat and Winlogbeat, ignore the max_retries setting
# and retry until all events are published. Set max_retries to a value less
# than 0 to retry until all events are published. The default is 3.
#max_retries: 3
# The maximum number of events to bulk in a single Logstash request. The
# default is 2048.
#bulk_max_size: 2048
# The number of seconds to wait for responses from the Logstash server before
# timing out. The default is 30s.
#timeout: 30s
#------------------------------- Kafka output ----------------------------------
#output.kafka:
# Boolean flag to enable or disable the output module.
@ -537,9 +592,16 @@ output.elasticsearch:
#username: ''
#password: ''
# Kafka version auditbeat is assumed to run against. Defaults to the oldest
# supported stable version (currently version 0.8.2.0)
#version: 0.8.2
# Kafka version auditbeat is assumed to run against. Defaults to the "1.0.0".
#version: '1.0.0'
# Configure JSON encoding
#codec.json:
# Pretty print json event
#pretty: false
# Configure escaping html symbols in strings.
#escape_html: true
# Metadata update configuration. Metadata do contain leader information
# deciding which broker to use when publishing.
@ -587,6 +649,10 @@ output.elasticsearch:
# default is gzip.
#compression: gzip
# Set the compression level. Currently only gzip provides a compression level
# between 0 and 9. The default value is chosen by the compression algorithm.
#compression_level: 4
# The maximum permitted size of JSON-encoded messages. Bigger messages will be
# dropped. The default value is 1000000 (bytes). This value should be equal to
# or less than the broker's message.max.bytes.
@ -643,6 +709,14 @@ output.elasticsearch:
# Boolean flag to enable or disable the output module.
#enabled: true
# Configure JSON encoding
#codec.json:
# Pretty print json event
#pretty: false
# Configure escaping html symbols in strings.
#escape_html: true
# The list of Redis servers to connect to. If load balancing is enabled, the
# events are distributed to the servers in the list. If one server becomes
# unreachable, the events are distributed to the reachable servers only.
@ -690,6 +764,17 @@ output.elasticsearch:
# until all events are published. The default is 3.
#max_retries: 3
# The number of seconds to wait before trying to reconnect to Redis
# after a network error. After waiting backoff.init seconds, the Beat
# tries to reconnect. If the attempt fails, the backoff timer is increased
# exponentially up to backoff.max. After a successful connection, the backoff
# timer is reset. The default is 1s.
#backoff.init: 1s
# The maximum number of seconds to wait before attempting to connect to
# Redis after a network error. The default is 60s.
#backoff.max: 60s
# The maximum number of events to bulk in a single Redis request or pipeline.
# The default is 2048.
#bulk_max_size: 2048
@ -744,6 +829,14 @@ output.elasticsearch:
# Boolean flag to enable or disable the output module.
#enabled: true
# Configure JSON encoding
#codec.json:
# Pretty print json event
#pretty: false
# Configure escaping html symbols in strings.
#escape_html: true
# Path to the directory where to save the generated files. The option is
# mandatory.
#path: "/tmp/auditbeat"
@ -771,8 +864,13 @@ output.elasticsearch:
# Boolean flag to enable or disable the output module.
#enabled: true
# Pretty print json event
#pretty: false
# Configure JSON encoding
#codec.json:
# Pretty print json event
#pretty: false
# Configure escaping html symbols in strings.
#escape_html: true
#================================= Paths ======================================
@ -870,6 +968,22 @@ output.elasticsearch:
# Path to fields.yml file to generate the template
#setup.template.fields: "${path.config}/fields.yml"
# A list of fields to be added to the template and Kibana index pattern. Also
# specify setup.template.overwrite: true to overwrite the existing template.
# This setting is experimental.
#setup.template.append_fields:
#- name: field_name
# type: field_type
# Enable json template loading. If this is enabled, the fields.yml is ignored.
#setup.template.json.enabled: false
# Path to the json template file
#setup.template.json.path: "${path.config}/template.json"
# Name under which the template is stored in Elasticsearch
#setup.template.json.name: ""
# Overwrite existing template
#setup.template.overwrite: false
@ -993,6 +1107,13 @@ logging.files:
# Must be a valid Unix-style file permissions mask expressed in octal notation.
#permissions: 0600
# Enable log file rotation on time intervals in addition to size-based rotation.
# Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h
# are boundary-aligned with minutes, hours, days, weeks, months, and years as
# reported by the local system clock. All other intervals are calculated from the
# unix epoch. Defaults to disabled.
#interval: 0
# Set to true to log messages in json format.
#logging.json: false
@ -1047,6 +1168,17 @@ logging.files:
# The default is 50.
#bulk_max_size: 50
# The number of seconds to wait before trying to reconnect to Elasticsearch
# after a network error. After waiting backoff.init seconds, the Beat
# tries to reconnect. If the attempt fails, the backoff timer is increased
# exponentially up to backoff.max. After a successful connection, the backoff
# timer is reset. The default is 1s.
#backoff.init: 1s
# The maximum number of seconds to wait before attempting to connect to
# Elasticsearch after a network error. The default is 60s.
#backoff.max: 60s
# Configure http request timeout before failing an request to Elasticsearch.
#timeout: 90
@ -1086,6 +1218,9 @@ logging.files:
# never, once, and freely. Default is never.
#ssl.renegotiation: never
#metrics.period: 10s
#state.period: 1m
#================================ HTTP Endpoint ======================================
# Each beat can expose internal metrics through a HTTP endpoint. For security
# reasons the endpoint is disabled by default. This feature is currently experimental.
@ -1100,3 +1235,8 @@ logging.files:
# Port on which the HTTP endpoint will bind. Default is 5066.
#http.port: 5066
#============================= Process Security ================================
# Enable or disable seccomp system call filtering on Linux. Default is enabled.
#seccomp.enabled: true

View File

@ -11,6 +11,8 @@
auditbeat.modules:
- module: auditd
# Load audit rules from separate files. Same format as audit.rules(7).
audit_rule_files: [ '${path.config}/audit.rules.d/*.conf' ]
audit_rules: |
## Define audit rules here.
## Create file watches (-w) or syscall audits (-a or -A). Uncomment these
@ -93,6 +95,11 @@ setup.kibana:
# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
#host: "localhost:5601"
# Kibana Space ID
# ID of the Kibana Space into which the dashboards should be loaded. By default,
# the Default Space will be used.
#space.id:
#============================= Elastic Cloud ==================================
# These settings simplify using auditbeat with the Elastic Cloud (https://cloud.elastic.co/).
@ -135,6 +142,14 @@ output.elasticsearch:
# Client Certificate Key
#ssl.key: "/etc/pki/client/cert.key"
#================================ Procesors =====================================
# Configure processors to enhance or manipulate events generated by the beat.
processors:
- add_host_metadata: ~
- add_cloud_metadata: ~
#================================ Logging =====================================
# Sets log level. The default log level is info.

View File

@ -1,12 +1,29 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package cmd
import (
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/elastic/beats/metricbeat/beater"
"github.com/elastic/beats/auditbeat/core"
cmd "github.com/elastic/beats/libbeat/cmd"
"github.com/elastic/beats/libbeat/cmd"
"github.com/elastic/beats/metricbeat/beater"
"github.com/elastic/beats/metricbeat/mb/module"
)
@ -16,6 +33,12 @@ const Name = "auditbeat"
// RootCmd for running auditbeat.
var RootCmd *cmd.BeatsRootCmd
// ShowCmd to display extra information.
var ShowCmd = &cobra.Command{
Use: "show",
Short: "Show modules information",
}
func init() {
create := beater.Creator(
beater.WithModuleOptions(
@ -24,4 +47,5 @@ func init() {
)
var runFlags = pflag.NewFlagSet(Name, pflag.ExitOnError)
RootCmd = cmd.GenRootCmdWithRunFlags(Name, "", create, runFlags)
RootCmd.AddCommand(ShowCmd)
}

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package core
import (

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package datastore
import (
@ -5,7 +22,7 @@ import (
"os"
"sync"
"github.com/boltdb/bolt"
bolt "github.com/coreos/bbolt"
"github.com/elastic/beats/libbeat/paths"
)

View File

@ -13,6 +13,11 @@ services:
volumes:
- ${PWD}/..:/go/src/github.com/elastic/beats/
command: make
privileged: true
pid: host
cap_add:
- AUDIT_CONTROL
- AUDIT_READ
# This is a proxy used to block beats until all services are healthy.
# See: https://github.com/docker/compose/issues/4369

View File

@ -35,6 +35,7 @@ The following topics describe how to configure {beatname_uc}:
* <<using-environ-vars>>
* <<yaml-tips>>
* <<regexp-support>>
* <<http-endpoint>>
* <<{beatname_lc}-reference-yml>>
After changing configuration settings, you need to restart {beatname_uc} to
@ -48,7 +49,6 @@ include::./auditbeat-general-options.asciidoc[]
include::./reload-configuration.asciidoc[]
:allplatforms:
include::../../libbeat/docs/queueconfig.asciidoc[]
include::../../libbeat/docs/outputconfig.asciidoc[]
@ -69,11 +69,14 @@ include::../../libbeat/docs/loggingconfig.asciidoc[]
:standalone:
include::../../libbeat/docs/shared-env-vars.asciidoc[]
:standalone!:
:standalone:
:allplatforms:
include::../../libbeat/docs/yaml.asciidoc[]
:standalone!:
include::../../libbeat/docs/regexp.asciidoc[]
include::../../libbeat/docs/http-endpoint.asciidoc[]
include::../../libbeat/docs/reference-yml.asciidoc[]

View File

@ -3192,6 +3192,15 @@ SHA512/256 hash of the file.
--
*`hash.xxh64`*::
+
--
type: keyword
XX64 hash of the file.
--
[[exported-fields-host-processor]]
== Host fields
@ -3258,6 +3267,26 @@ type: keyword
OS family (e.g. redhat, debian, freebsd, windows).
--
*`host.ip`*::
+
--
type: ip
List of IP-addresses.
--
*`host.mac`*::
+
--
type: keyword
List of hardware-addresses, usually MAC-addresses.
--
[[exported-fields-kubernetes-processor]]
@ -3276,6 +3305,16 @@ type: keyword
Kubernetes pod name
--
*`kubernetes.pod.uid`*::
+
--
type: keyword
Kubernetes Pod UID
--
*`kubernetes.namespace`*::

View File

@ -75,8 +75,8 @@ tar xzvf {beatname_lc}-{version}-darwin-x86_64.tar.gz
endif::[]
[[docker]]
*docker:*
[[linux]]
*linux:*
ifeval::["{release-state}"=="unreleased"]
@ -86,13 +86,19 @@ endif::[]
ifeval::["{release-state}"!="unreleased"]
["source", "shell", subs="attributes"]
["source","sh",subs="attributes"]
------------------------------------------------
docker pull {dockerimage}
curl -L -O https://artifacts.elastic.co/downloads/beats/{beatname_lc}/{beatname_lc}-{version}-linux-x86_64.tar.gz
tar xzvf {beatname_lc}-{version}-linux-x86_64.tar.gz
------------------------------------------------
endif::[]
[[docker]]
*docker:*
See <<running-on-docker, Running on Docker>> for deploying Docker containers.
[[win]]
*win:*
@ -168,17 +174,7 @@ If you accept the default configuration without specifying additional modules,
+
See <<configuring-howto-{beatname_lc}>> for more details about configuring modules.
. If you are sending output to Elasticsearch (and not using Logstash), set the
IP address and port where {beatname_uc} can find the Elasticsearch installation:
+
[source,yaml]
----------------------------------------------------------------------
output.elasticsearch:
hosts: ["127.0.0.1:9200"]
----------------------------------------------------------------------
+
If you are sending output to Logstash, make sure you
<<logstash-output,Configure the Logstash output>> instead.
include::../../libbeat/docs/step-configure-output.asciidoc[]
include::../../libbeat/docs/step-configure-kibana-endpoint.asciidoc[]
@ -189,52 +185,43 @@ include::../../libbeat/docs/step-test-config.asciidoc[]
include::../../libbeat/docs/step-look-at-config.asciidoc[]
[id="{beatname_lc}-template"]
=== Step 3: Load the index template in Elasticsearch
=== Step 3: Load the index template in {es}
:allplatforms:
include::../../libbeat/docs/shared-template-load.asciidoc[]
[[load-kibana-dashboards]]
=== Step 4: Set up the Kibana dashboards
=== Step 4: Set up the {kib} dashboards
:allplatforms:
include::../../libbeat/docs/dashboards.asciidoc[]
[id="{beatname_lc}-starting"]
=== Step 5: Start {beatname_uc}
Run {beatname_uc} by issuing the appropriate command for your platform. If you
are accessing a secured Elasticsearch cluster, make sure you've configured
credentials as described in <<{beatname_lc}-configuration>>.
are accessing a secured {es} cluster, make sure you've configured credentials as
described in <<{beatname_lc}-configuration>>.
NOTE: If you use an init.d script to start {beatname_uc} on deb or rpm, you can't
specify command line flags (see <<command-line-options>>). To specify flags,
start {beatname_uc} in the foreground.
*deb:*
*deb and rpm:*
["source","sh",subs="attributes"]
----------------------------------------------------------------------
sudo service {beatname_lc} start
----------------------------------------------------------------------
*rpm:*
["source","sh",subs="attributes"]
----------------------------------------------------------------------
sudo service {beatname_lc} start
----------------------------------------------------------------------
*mac:*
*mac and linux:*
["source","sh",subs="attributes"]
----------------------------------------------------------------------
sudo chown root {beatname_lc}.yml <1>
sudo ./{beatname_lc} -e -c {beatname_lc}.yml -d "publish"
sudo ./{beatname_lc} -e
----------------------------------------------------------------------
<1> To monitor system files, you'll be running {beatname_uc} as root, so you
need to change ownership of the configuration file, or run {beatname_uc} with
`-strict.perms=false` specified. See
`--strict.perms=false` specified. See
{libbeat}/config-file-permissions.html[Config File Ownership and Permissions]
in the _Beats Platform Reference_.
@ -252,22 +239,22 @@ By default the log files are stored in +C:{backslash}ProgramData{backslash}{beat
==== Test the {beatname_uc} installation
To verify that your server's statistics are present in Elasticsearch, issue
the following command:
To verify that your server's statistics are present in {es}, issue the following
command:
["source","sh",subs="attributes"]
----------------------------------------------------------------------
curl -XGET 'http://localhost:9200/{beatname_lc}-*/_search?pretty'
----------------------------------------------------------------------
Make sure that you replace `localhost:9200` with the address of your
Elasticsearch instance.
Make sure that you replace `localhost:9200` with the address of your {es}
instance.
On Windows, if you don't have cURL installed, simply point your browser to the
URL.
[[view-kibana-dashboards]]
=== Step 6: View the sample Kibana dashboards
=== Step 6: View the sample {kib} dashboards
To make it easier for you to start auditing the activities of users and
processes on your system, we have created example {beatname_uc} dashboards.
@ -278,4 +265,5 @@ include::../../libbeat/docs/opendashboards.asciidoc[]
The dashboards are provided as examples. We recommend that you
{kibana-ref}/dashboard.html[customize] them to meet your needs.
[role="screenshot"]
image:./images/auditbeat-file-integrity-dashboard.png[Auditbeat File Integrity Dashboard]

View File

@ -12,6 +12,12 @@ include::{asciidoc-dir}/../../shared/attributes.asciidoc[]
:discuss_forum: beats/{beatname_lc}
:beat_default_index_prefix: {beatname_lc}
:has_ml_jobs: yes
:deb_os:
:rpm_os:
:mac_os:
:docker_platform:
:win_os:
:linux_os:
include::../../libbeat/docs/shared-beats-attributes.asciidoc[]
@ -33,9 +39,7 @@ include::./fields.asciidoc[]
include::../../libbeat/docs/monitoring/monitoring-beats.asciidoc[]
include::./securing-auditbeat.asciidoc[]
include::../../libbeat/docs/security/securing-beats.asciidoc[]
include::../../libbeat/docs/shared-securing-beat.asciidoc[]
include::./troubleshooting.asciidoc[]

View File

@ -67,6 +67,55 @@ from listening to audit messages:
systemctl mask systemd-journald-audit.socket
-----
[float]
=== Inspect the kernel audit system status
{beatname_uc} provides useful commands to query the state of the audit system
in the Linux kernel.
* See the list of installed audit rules:
+
[source,shell]
-----
auditbeat show auditd-rules
-----
+
Prints the list of loaded rules, similar to `auditctl -l`:
+
[source,shell]
-----
-a never,exit -S all -F pid=26253
-a always,exit -F arch=b32 -S all -F key=32bit-abi
-a always,exit -F arch=b64 -S execve,execveat -F key=exec
-a always,exit -F arch=b64 -S connect,accept,bind -F key=external-access
-w /etc/group -p wa -k identity
-w /etc/passwd -p wa -k identity
-w /etc/gshadow -p wa -k identity
-a always,exit -F arch=b64 -S open,truncate,ftruncate,creat,openat,open_by_handle_at -F exit=-EACCES -F key=access
-a always,exit -F arch=b64 -S open,truncate,ftruncate,creat,openat,open_by_handle_at -F exit=-EPERM -F key=access
-----
* See the status of the audit system:
+
[source,shell]
-----
auditbeat show auditd-status
-----
+
Prints the status of the kernel audit system, similar to `auditctl -s`:
+
[source,shell]
-----
enabled 1
failure 0
pid 0
rate_limit 0
backlog_limit 8192
lost 14407
backlog 0
backlog_wait_time 0
features 0xf
-----
[float]
=== Configuration options
@ -79,10 +128,11 @@ following example shows all configuration options with their default values.
- module: auditd
resolve_ids: true
failure_mode: silent
backlog_limit: 8196
backlog_limit: 8192
rate_limit: 0
include_raw_message: false
include_warnings: false
backpressure_strategy: auto
----
*`socket_type`*:: This optional setting controls the type of
@ -141,6 +191,33 @@ embedded in the string using `#` as a prefix. The format for rules is the same
used by the Linux `auditctl` utility. {beatname_uc} supports adding file watches
(`-w`) and syscall rules (`-a` or `-A`).
*`audit_rule_files`*:: A list of files to load audit rules from. This files are
loaded after the rules declared in `audit_rules` are loaded. Wildcards are
supported and will expand in lexicographical order. The format is the same as
that of the `audit_rules` field.
*`backpressure_strategy`*:: Specifies the strategy that {beatname_uc} uses to
prevent backpressure from propagating to the kernel and impacting audited
processes.
+
--
The possible values are:
- `auto` (default): {beatname_uc} uses the `kernel` strategy, if supported, or
falls back to the `userspace` strategy.
- `kernel`: {beatname_uc} sets the `backlog_wait_time` in the kernel's
audit framework to 0. This causes events to be discarded in the kernel if
the audit backlog queue fills to capacity. Requires a 3.14 kernel or
newer.
- `userspace`: {beatname_uc} drops events when there is backpressure
from the publishing pipeline. If no `rate_limit` is set, {beatname_uc} sets a rate
limit of 5000. Users should test their setup and adjust the `rate_limit`
option accordingly.
- `both`: {beatname_uc} uses the `kernel` and `userspace` strategies at the same
time.
- `none`: No backpressure mitigation measures are enabled.
--
[float]
=== Audit rules
@ -193,6 +270,8 @@ is an example configuration:
----
auditbeat.modules:
- module: auditd
# Load audit rules from separate files. Same format as audit.rules(7).
audit_rule_files: [ '${path.config}/audit.rules.d/*.conf' ]
audit_rules: |
## Define audit rules here.
## Create file watches (-w) or syscall audits (-a or -A). Uncomment these

View File

@ -101,7 +101,7 @@ a suffix to the value. The supported units are `b` (default), `kib`, `kb`, `mib`
*`hash_types`*:: A list of hash types to compute when the file changes.
The supported hash types are `blake2b_256`, `blake2b_384`, `blake2b_512`, `md5`,
`sha1`, `sha224`, `sha256`, `sha384`, `sha512`, `sha512_224`, `sha512_256`,
`sha3_224`, `sha3_256`, `sha3_384`, and `sha3_512`. The default value is `sha1`.
`sha3_224`, `sha3_256`, `sha3_384`, `sha3_512`, and `xxh64`. The default value is `sha1`.
*`recursive`*:: By default, the watches set to the paths specified in
`paths` are not recursive. This means that only changes to the contents

View File

@ -1,6 +1,5 @@
include::../../libbeat/docs/shared-docker.asciidoc[]
[float]
==== Special requirements
Under Docker, {beatname_uc} runs as a non-root user, but requires some privileged

View File

@ -1,28 +0,0 @@
[id="securing-{beatname_lc}"]
= Securing {beatname_uc}
[partintro]
--
The following topics describe how to secure communication between {beatname_uc}
and other products in the Elastic stack:
* <<securing-communication-elasticsearch>>
* <<configuring-ssl-logstash>>
* <<securing-beats>>
//sets block macro for https.asciidoc included in next section
--
[[securing-communication-elasticsearch]]
== Secure communication with Elasticsearch
include::../../libbeat/docs/https.asciidoc[]
//sets block macro for shared-ssl-logstash-config.asciidoc included in next section
[[configuring-ssl-logstash]]
== Secure communication with Logstash by using SSL
include::../../libbeat/docs/shared-ssl-logstash-config.asciidoc[]

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,25 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package include
import (
// Include all Auditbeat modules so that they register their
// factories with the global registry.
_ "github.com/elastic/beats/auditbeat/module/auditd"
_ "github.com/elastic/beats/auditbeat/module/file_integrity"
)

271
vendor/github.com/elastic/beats/auditbeat/magefile.go generated vendored Normal file
View File

@ -0,0 +1,271 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// +build mage
package main
import (
"context"
"fmt"
"regexp"
"time"
"github.com/magefile/mage/mg"
"github.com/magefile/mage/sh"
"github.com/pkg/errors"
"github.com/elastic/beats/dev-tools/mage"
)
func init() {
mage.BeatDescription = "Audit the activities of users and processes on your system."
}
// Build builds the Beat binary.
func Build() error {
return mage.Build(mage.DefaultBuildArgs())
}
// GolangCrossBuild build the Beat binary inside of the golang-builder.
// Do not use directly, use crossBuild instead.
func GolangCrossBuild() error {
return mage.GolangCrossBuild(mage.DefaultGolangCrossBuildArgs())
}
// BuildGoDaemon builds the go-daemon binary (use crossBuildGoDaemon).
func BuildGoDaemon() error {
return mage.BuildGoDaemon()
}
// CrossBuild cross-builds the beat for all target platforms.
func CrossBuild() error {
return mage.CrossBuild()
}
// CrossBuildXPack cross-builds the beat with XPack for all target platforms.
func CrossBuildXPack() error {
return mage.CrossBuildXPack()
}
// CrossBuildGoDaemon cross-builds the go-daemon binary using Docker.
func CrossBuildGoDaemon() error {
return mage.CrossBuildGoDaemon()
}
// Clean cleans all generated files and build artifacts.
func Clean() error {
return mage.Clean()
}
// Package packages the Beat for distribution.
// Use SNAPSHOT=true to build snapshots.
// Use PLATFORMS to control the target platforms.
func Package() {
start := time.Now()
defer func() { fmt.Println("package ran for", time.Since(start)) }()
mage.UseElasticBeatPackaging()
customizePackaging()
mg.Deps(Update)
mg.Deps(makeConfigTemplates, CrossBuild, CrossBuildXPack, CrossBuildGoDaemon)
mg.SerialDeps(mage.Package, TestPackages)
}
// TestPackages tests the generated packages (i.e. file modes, owners, groups).
func TestPackages() error {
return mage.TestPackages()
}
// Update updates the generated files (aka make update).
func Update() error {
return sh.Run("make", "update")
}
// Fields generates a fields.yml for the Beat.
func Fields() error {
return mage.GenerateFieldsYAML("module")
}
// GoTestUnit executes the Go unit tests.
// Use TEST_COVERAGE=true to enable code coverage profiling.
// Use RACE_DETECTOR=true to enable the race detector.
func GoTestUnit(ctx context.Context) error {
return mage.GoTest(ctx, mage.DefaultGoTestUnitArgs())
}
// GoTestIntegration executes the Go integration tests.
// Use TEST_COVERAGE=true to enable code coverage profiling.
// Use RACE_DETECTOR=true to enable the race detector.
func GoTestIntegration(ctx context.Context) error {
return mage.GoTest(ctx, mage.DefaultGoTestIntegrationArgs())
}
// -----------------------------------------------------------------------------
// Customizations specific to Auditbeat.
// - Config files are Go templates.
const (
configTemplateGlob = "module/*/_meta/config*.yml.tmpl"
shortConfigTemplate = "build/auditbeat.yml.tmpl"
referenceConfigTemplate = "build/auditbeat.reference.yml.tmpl"
)
func makeConfigTemplates() error {
configFiles, err := mage.FindFiles(configTemplateGlob)
if err != nil {
return errors.Wrap(err, "failed to find config templates")
}
var shortIn []string
shortIn = append(shortIn, "_meta/common.p1.yml")
shortIn = append(shortIn, configFiles...)
shortIn = append(shortIn, "_meta/common.p2.yml")
shortIn = append(shortIn, "../libbeat/_meta/config.yml")
if !mage.IsUpToDate(shortConfigTemplate, shortIn...) {
fmt.Println(">> Building", shortConfigTemplate)
mage.MustFileConcat(shortConfigTemplate, 0600, shortIn...)
mage.MustFindReplace(shortConfigTemplate, regexp.MustCompile("beatname"), "{{.BeatName}}")
mage.MustFindReplace(shortConfigTemplate, regexp.MustCompile("beat-index-prefix"), "{{.BeatIndexPrefix}}")
}
var referenceIn []string
referenceIn = append(referenceIn, "_meta/common.reference.yml")
referenceIn = append(referenceIn, configFiles...)
referenceIn = append(referenceIn, "../libbeat/_meta/config.reference.yml")
if !mage.IsUpToDate(referenceConfigTemplate, referenceIn...) {
fmt.Println(">> Building", referenceConfigTemplate)
mage.MustFileConcat(referenceConfigTemplate, 0644, referenceIn...)
mage.MustFindReplace(referenceConfigTemplate, regexp.MustCompile("beatname"), "{{.BeatName}}")
mage.MustFindReplace(referenceConfigTemplate, regexp.MustCompile("beat-index-prefix"), "{{.BeatIndexPrefix}}")
}
return nil
}
// customizePackaging modifies the package specs to use templated config files
// instead of the defaults.
//
// Customizations specific to Auditbeat:
// - Include audit.rules.d directory in packages.
func customizePackaging() {
var (
shortConfig = mage.PackageFile{
Mode: 0600,
Source: "{{.PackageDir}}/auditbeat.yml",
Dep: generateShortConfig,
Config: true,
}
referenceConfig = mage.PackageFile{
Mode: 0644,
Source: "{{.PackageDir}}/auditbeat.reference.yml",
Dep: generateReferenceConfig,
}
)
archiveRulesDir := "audit.rules.d"
linuxPkgRulesDir := "/etc/{{.BeatName}}/audit.rules.d"
rulesSrcDir := "module/auditd/_meta/audit.rules.d"
sampleRules := mage.PackageFile{
Mode: 0644,
Source: rulesSrcDir,
Dep: func(spec mage.PackageSpec) error {
if spec.OS == "linux" {
params := map[string]interface{}{
"ArchBits": archBits,
}
rulesFile := spec.MustExpand(rulesSrcDir+"/sample-rules-linux-{{call .ArchBits .GOARCH}}bit.conf", params)
if err := mage.Copy(rulesFile, spec.MustExpand("{{.PackageDir}}/audit.rules.d/sample-rules.conf.disabled")); err != nil {
return errors.Wrap(err, "failed to copy sample rules")
}
}
return nil
},
}
for _, args := range mage.Packages {
pkgType := args.Types[0]
switch pkgType {
case mage.TarGz, mage.Zip:
args.Spec.ReplaceFile("{{.BeatName}}.yml", shortConfig)
args.Spec.ReplaceFile("{{.BeatName}}.reference.yml", referenceConfig)
case mage.Deb, mage.RPM, mage.DMG:
args.Spec.ReplaceFile("/etc/{{.BeatName}}/{{.BeatName}}.yml", shortConfig)
args.Spec.ReplaceFile("/etc/{{.BeatName}}/{{.BeatName}}.reference.yml", referenceConfig)
default:
panic(errors.Errorf("unhandled package type: %v", pkgType))
}
if args.OS == "linux" {
rulesDest := archiveRulesDir
if pkgType != mage.TarGz {
rulesDest = linuxPkgRulesDir
}
args.Spec.Files[rulesDest] = sampleRules
}
}
}
func generateReferenceConfig(spec mage.PackageSpec) error {
params := map[string]interface{}{
"Reference": true,
"ArchBits": archBits,
}
return spec.ExpandFile(referenceConfigTemplate,
"{{.PackageDir}}/auditbeat.reference.yml", params)
}
func generateShortConfig(spec mage.PackageSpec) error {
params := map[string]interface{}{
"Reference": false,
"ArchBits": archBits,
}
return spec.ExpandFile(shortConfigTemplate,
"{{.PackageDir}}/auditbeat.yml", params)
}
// archBits returns the number of bit width of the GOARCH architecture value.
// This function is used by the auditd module configuration templates to
// generate architecture specific audit rules.
func archBits(goarch string) int {
switch goarch {
case "386", "arm":
return 32
default:
return 64
}
}
// Configs generates the auditbeat.yml and auditbeat.reference.yml config files.
// Set DEV_OS and DEV_ARCH to change the target host for the generated configs.
// Defaults to linux/amd64.
func Configs() {
mg.Deps(makeConfigTemplates)
params := map[string]interface{}{
"GOOS": mage.EnvOr("DEV_OS", "linux"),
"GOARCH": mage.EnvOr("DEV_ARCH", "amd64"),
"ArchBits": archBits,
"Reference": false,
}
fmt.Printf(">> Building auditbeat.yml for %v/%v\n", params["GOOS"], params["GOARCH"])
mage.MustExpandFile(shortConfigTemplate, "auditbeat.yml", params)
params["Reference"] = true
fmt.Printf(">> Building auditbeat.reference.yml for %v/%v\n", params["GOOS"], params["GOARCH"])
mage.MustExpandFile(referenceConfigTemplate, "auditbeat.reference.yml", params)
}

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package main
import (
@ -8,6 +25,9 @@ import (
// Register modules.
_ "github.com/elastic/beats/auditbeat/module/auditd"
_ "github.com/elastic/beats/auditbeat/module/file_integrity"
// Register includes.
_ "github.com/elastic/beats/auditbeat/include"
)
func main() {

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package main
// This file is mandatory as otherwise the auditbeat.test binary is not generated correctly.

11
vendor/github.com/elastic/beats/auditbeat/make.bat generated vendored Normal file
View File

@ -0,0 +1,11 @@
@echo off
REM Windows wrapper for Mage (https://magefile.org/) that installs it
REM to %GOPATH%\bin from the Beats vendor directory.
REM
REM After running this once you may invoke mage.exe directly.
WHERE mage
IF %ERRORLEVEL% NEQ 0 go install github.com/elastic/beats/vendor/github.com/magefile/mage
mage %*

View File

@ -0,0 +1,14 @@
## Executions.
-a always,exit -F arch=b32 -S execve,execveat -k exec
## External access (warning: these can be expensive to audit).
-a always,exit -F arch=b32 -S accept4,bind,connect -F key=external-access
## Identity changes.
-w /etc/group -p wa -k identity
-w /etc/passwd -p wa -k identity
-w /etc/gshadow -p wa -k identity
## Unauthorized access attempts.
-a always,exit -F arch=b32 -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EACCES -k access
-a always,exit -F arch=b32 -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EPERM -k access

View File

@ -0,0 +1,20 @@
## If you are on a 64 bit platform, everything should be running
## in 64 bit mode. This rule will detect any use of the 32 bit syscalls
## because this might be a sign of someone exploiting a hole in the 32
## bit API.
-a always,exit -F arch=b32 -S all -F key=32bit-abi
## Executions.
-a always,exit -F arch=b64 -S execve,execveat -k exec
## External access (warning: these can be expensive to audit).
-a always,exit -F arch=b64 -S accept,bind,connect -F key=external-access
## Identity changes.
-w /etc/group -p wa -k identity
-w /etc/passwd -p wa -k identity
-w /etc/gshadow -p wa -k identity
## Unauthorized access attempts.
-a always,exit -F arch=b64 -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EACCES -k access
-a always,exit -F arch=b64 -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EPERM -k access

View File

@ -1,10 +1,10 @@
{{ if eq .goos "linux" -}}
{{ if .reference -}}
{{ if eq .GOOS "linux" -}}
{{ if .Reference -}}
# The auditd module collects events from the audit framework in the Linux
# kernel. You need to specify audit rules for the events that you want to audit.
{{ end -}}
- module: auditd
{{ if .reference -}}
{{ if .Reference -}}
resolve_ids: true
failure_mode: silent
backlog_limit: 8196
@ -12,12 +12,14 @@
include_raw_message: false
include_warnings: false
{{ end -}}
# Load audit rules from separate files. Same format as audit.rules(7).
audit_rule_files: [ '${path.config}/audit.rules.d/*.conf' ]
audit_rules: |
## Define audit rules here.
## Create file watches (-w) or syscall audits (-a or -A). Uncomment these
## examples or add your own rules.
{{ if eq .goarch "amd64" -}}
{{ if eq .GOARCH "amd64" -}}
## If you are on a 64 bit platform, everything should be running
## in 64 bit mode. This rule will detect any use of the 32 bit syscalls
## because this might be a sign of someone exploiting a hole in the 32
@ -26,10 +28,10 @@
{{ end -}}
## Executions.
#-a always,exit -F arch=b{{.arch_bits}} -S execve,execveat -k exec
#-a always,exit -F arch=b{{call .ArchBits .GOARCH}} -S execve,execveat -k exec
## External access (warning: these can be expensive to audit).
#-a always,exit -F arch=b{{.arch_bits}} -S accept,bind,connect -F key=external-access
#-a always,exit -F arch=b{{call .ArchBits .GOARCH}} -S accept,bind,connect -F key=external-access
## Identity changes.
#-w /etc/group -p wa -k identity
@ -37,6 +39,6 @@
#-w /etc/gshadow -p wa -k identity
## Unauthorized access attempts.
#-a always,exit -F arch=b{{.arch_bits}} -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EACCES -k access
#-a always,exit -F arch=b{{.arch_bits}} -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EPERM -k access
#-a always,exit -F arch=b{{call .ArchBits .GOARCH}} -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EACCES -k access
#-a always,exit -F arch=b{{call .ArchBits .GOARCH}} -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EPERM -k access
{{ end -}}

View File

@ -32,10 +32,16 @@
"module": "auditd",
"type": "user_login"
},
"network": {
"direction": "incoming"
},
"process": {
"exe": "/usr/sbin/sshd",
"pid": "12635"
},
"source": {
"ip": "179.38.151.221"
},
"user": {
"auid": "unset",
"name_map": {

View File

@ -62,6 +62,55 @@ from listening to audit messages:
systemctl mask systemd-journald-audit.socket
-----
[float]
=== Inspect the kernel audit system status
{beatname_uc} provides useful commands to query the state of the audit system
in the Linux kernel.
* See the list of installed audit rules:
+
[source,shell]
-----
auditbeat show auditd-rules
-----
+
Prints the list of loaded rules, similar to `auditctl -l`:
+
[source,shell]
-----
-a never,exit -S all -F pid=26253
-a always,exit -F arch=b32 -S all -F key=32bit-abi
-a always,exit -F arch=b64 -S execve,execveat -F key=exec
-a always,exit -F arch=b64 -S connect,accept,bind -F key=external-access
-w /etc/group -p wa -k identity
-w /etc/passwd -p wa -k identity
-w /etc/gshadow -p wa -k identity
-a always,exit -F arch=b64 -S open,truncate,ftruncate,creat,openat,open_by_handle_at -F exit=-EACCES -F key=access
-a always,exit -F arch=b64 -S open,truncate,ftruncate,creat,openat,open_by_handle_at -F exit=-EPERM -F key=access
-----
* See the status of the audit system:
+
[source,shell]
-----
auditbeat show auditd-status
-----
+
Prints the status of the kernel audit system, similar to `auditctl -s`:
+
[source,shell]
-----
enabled 1
failure 0
pid 0
rate_limit 0
backlog_limit 8192
lost 14407
backlog 0
backlog_wait_time 0
features 0xf
-----
[float]
=== Configuration options
@ -74,10 +123,11 @@ following example shows all configuration options with their default values.
- module: auditd
resolve_ids: true
failure_mode: silent
backlog_limit: 8196
backlog_limit: 8192
rate_limit: 0
include_raw_message: false
include_warnings: false
backpressure_strategy: auto
----
*`socket_type`*:: This optional setting controls the type of
@ -136,6 +186,33 @@ embedded in the string using `#` as a prefix. The format for rules is the same
used by the Linux `auditctl` utility. {beatname_uc} supports adding file watches
(`-w`) and syscall rules (`-a` or `-A`).
*`audit_rule_files`*:: A list of files to load audit rules from. This files are
loaded after the rules declared in `audit_rules` are loaded. Wildcards are
supported and will expand in lexicographical order. The format is the same as
that of the `audit_rules` field.
*`backpressure_strategy`*:: Specifies the strategy that {beatname_uc} uses to
prevent backpressure from propagating to the kernel and impacting audited
processes.
+
--
The possible values are:
- `auto` (default): {beatname_uc} uses the `kernel` strategy, if supported, or
falls back to the `userspace` strategy.
- `kernel`: {beatname_uc} sets the `backlog_wait_time` in the kernel's
audit framework to 0. This causes events to be discarded in the kernel if
the audit backlog queue fills to capacity. Requires a 3.14 kernel or
newer.
- `userspace`: {beatname_uc} drops events when there is backpressure
from the publishing pipeline. If no `rate_limit` is set, {beatname_uc} sets a rate
limit of 5000. Users should test their setup and adjust the `rate_limit`
option accordingly.
- `both`: {beatname_uc} uses the `kernel` and `userspace` strategies at the same
time.
- `none`: No backpressure mitigation measures are enabled.
--
[float]
=== Audit rules

View File

@ -40,8 +40,8 @@
"session": "11",
"summary": {
"actor": {
"primary": "1001",
"secondary": "1001"
"primary": "ubuntu",
"secondary": "ubuntu"
},
"how": "/bin/uname",
"object": {
@ -88,6 +88,13 @@
"fsgid": "1002",
"fsuid": "1001",
"gid": "1002",
"name_map": {
"auid": "ubuntu",
"euid": "ubuntu",
"fsuid": "ubuntu",
"suid": "ubuntu",
"uid": "ubuntu"
},
"sgid": "1002",
"suid": "1001",
"uid": "1001"

View File

@ -1,99 +1,336 @@
{
"objects": [
{
"attributes": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\n \"filter\": [],\n \"query\": {\n \"query\": \"\",\n \"language\": \"lucene\"\n }\n}"
},
"savedSearchId": "d382f5b0-c1c6-11e7-8995-936807a28b16",
"title": "Error Codes [Auditbeat Auditd Executions]",
"uiStateJSON": "{}",
"version": 1,
"visState": "{\n \"title\": \"Error Codes [Auditbeat Auditd Executions]\",\n \"type\": \"pie\",\n \"params\": {\n \"type\": \"pie\",\n \"addTooltip\": true,\n \"addLegend\": true,\n \"legendPosition\": \"right\",\n \"isDonut\": true\n },\n \"aggs\": [\n {\n \"id\": \"1\",\n \"enabled\": true,\n \"type\": \"count\",\n \"schema\": \"metric\",\n \"params\": {}\n },\n {\n \"id\": \"2\",\n \"enabled\": true,\n \"type\": \"terms\",\n \"schema\": \"segment\",\n \"params\": {\n \"field\": \"auditd.data.exit\",\n \"exclude\": \"0\",\n \"size\": 10,\n \"order\": \"desc\",\n \"orderBy\": \"1\"\n }\n }\n ]\n}"
},
"id": "20a8e8d0-c1c8-11e7-8995-936807a28b16",
"type": "visualization",
"updated_at": "2018-01-16T22:10:23.921Z",
"version": 4
},
{
"attributes": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\n \"index\": \"auditbeat-*\",\n \"filter\": [],\n \"query\": {\n \"query\": \"\",\n \"language\": \"lucene\"\n }\n}"
},
"title": "Primary Username Tag Cloud [Auditbeat Auditd]",
"uiStateJSON": "{}",
"version": 1,
"visState": "{\n \"title\": \"Primary Username Tag Cloud [Auditbeat Auditd]\",\n \"type\": \"tagcloud\",\n \"params\": {\n \"scale\": \"linear\",\n \"orientation\": \"single\",\n \"minFontSize\": 18,\n \"maxFontSize\": 45\n },\n \"aggs\": [\n {\n \"id\": \"1\",\n \"enabled\": true,\n \"type\": \"count\",\n \"schema\": \"metric\",\n \"params\": {}\n },\n {\n \"id\": \"2\",\n \"enabled\": true,\n \"type\": \"terms\",\n \"schema\": \"segment\",\n \"params\": {\n \"field\": \"auditd.summary.actor.primary\",\n \"size\": 10,\n \"order\": \"desc\",\n \"orderBy\": \"1\"\n }\n }\n ]\n}"
},
"id": "f81a6de0-c1c1-11e7-8995-936807a28b16",
"type": "visualization",
"updated_at": "2018-01-16T22:12:18.730Z",
"version": 3
},
{
"attributes": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[],\"query\":{\"query\":\"\",\"language\":\"lucene\"}}"
},
"savedSearchId": "d382f5b0-c1c6-11e7-8995-936807a28b16",
"title": "Exe Name Tag Cloud [Auditbeat Auditd Executions]",
"uiStateJSON": "{}",
"version": 1,
"visState": "{\"title\":\"Exe Name Tag Cloud [Auditbeat Auditd Executions]\",\"type\":\"tagcloud\",\"params\":{\"scale\":\"linear\",\"orientation\":\"single\",\"minFontSize\":14,\"maxFontSize\":45},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"process.exe\",\"size\":10,\"order\":\"desc\",\"orderBy\":\"1\"}}]}"
},
"id": "2efac370-c1ca-11e7-8995-936807a28b16",
"type": "visualization",
"updated_at": "2018-01-16T22:57:41.411Z",
"version": 4
},
{
"attributes": {
"columns": [
"beat.hostname",
"process.args",
"auditd.summary.actor.primary",
"auditd.summary.actor.secondary",
"process.exe"
],
"description": "",
"hits": 0,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"auditbeat-*\",\"highlightAll\":true,\"version\":true,\"query\":{\"language\":\"lucene\",\"query\":\"*\"},\"filter\":[{\"$state\":{\"store\":\"appState\"},\"meta\":{\"alias\":null,\"disabled\":false,\"index\":\"auditbeat-*\",\"key\":\"event.module\",\"negate\":false,\"params\":{\"query\":\"auditd\",\"type\":\"phrase\"},\"type\":\"phrase\",\"value\":\"auditd\"},\"query\":{\"match\":{\"event.module\":{\"query\":\"auditd\",\"type\":\"phrase\"}}}},{\"$state\":{\"store\":\"appState\"},\"meta\":{\"alias\":null,\"disabled\":false,\"index\":\"auditbeat-*\",\"key\":\"event.action\",\"negate\":false,\"params\":{\"query\":\"executed\",\"type\":\"phrase\"},\"type\":\"phrase\",\"value\":\"executed\"},\"query\":{\"match\":{\"event.action\":{\"query\":\"executed\",\"type\":\"phrase\"}}}}]}"
},
"sort": [
"@timestamp",
"desc"
],
"title": "Process Executions [Auditbeat Auditd]",
"version": 1
},
"id": "d382f5b0-c1c6-11e7-8995-936807a28b16",
"type": "search",
"updated_at": "2018-01-16T22:26:35.050Z",
"version": 5
},
{
"attributes": {
"description": "",
"hits": 0,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"query\":{\"language\":\"lucene\",\"query\":\"\"},\"filter\":[],\"highlightAll\":true,\"version\":true}"
},
"optionsJSON": "{\"darkTheme\":false,\"useMargins\":false}",
"panelsJSON": "[{\"gridData\":{\"h\":3,\"i\":\"1\",\"w\":4,\"x\":4,\"y\":0},\"id\":\"20a8e8d0-c1c8-11e7-8995-936807a28b16\",\"panelIndex\":\"1\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"gridData\":{\"h\":3,\"i\":\"3\",\"w\":4,\"x\":8,\"y\":0},\"id\":\"f81a6de0-c1c1-11e7-8995-936807a28b16\",\"panelIndex\":\"3\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"gridData\":{\"h\":3,\"i\":\"5\",\"w\":4,\"x\":0,\"y\":0},\"id\":\"2efac370-c1ca-11e7-8995-936807a28b16\",\"panelIndex\":\"5\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"gridData\":{\"h\":5,\"i\":\"6\",\"w\":12,\"x\":0,\"y\":3},\"id\":\"d382f5b0-c1c6-11e7-8995-936807a28b16\",\"panelIndex\":\"6\",\"type\":\"search\",\"version\":\"6.2.4\"}]",
"timeRestore": false,
"title": "[Auditbeat Auditd] Executions",
"version": 1
},
"id": "7de391b0-c1ca-11e7-8995-936807a28b16",
"type": "dashboard",
"updated_at": "2018-01-16T22:58:11.243Z",
"version": 5
}
],
"version": "6.2.4"
}
"objects": [
{
"attributes": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": {
"filter": [],
"query": {
"language": "lucene",
"query": ""
}
}
},
"savedSearchId": "d382f5b0-c1c6-11e7-8995-936807a28b16",
"title": "Error Codes [Auditbeat Auditd Executions]",
"uiStateJSON": {},
"version": 1,
"visState": {
"aggs": [
{
"enabled": true,
"id": "1",
"params": {},
"schema": "metric",
"type": "count"
},
{
"enabled": true,
"id": "2",
"params": {
"exclude": "0",
"field": "auditd.data.exit",
"order": "desc",
"orderBy": "1",
"size": 10
},
"schema": "segment",
"type": "terms"
}
],
"params": {
"addLegend": true,
"addTooltip": true,
"isDonut": true,
"legendPosition": "right",
"type": "pie"
},
"title": "Error Codes [Auditbeat Auditd Executions]",
"type": "pie"
}
},
"id": "20a8e8d0-c1c8-11e7-8995-936807a28b16",
"type": "visualization",
"updated_at": "2018-01-16T22:10:23.921Z",
"version": 4
},
{
"attributes": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": {
"filter": [],
"index": "auditbeat-*",
"query": {
"language": "lucene",
"query": ""
}
}
},
"title": "Primary Username Tag Cloud [Auditbeat Auditd]",
"uiStateJSON": {},
"version": 1,
"visState": {
"aggs": [
{
"enabled": true,
"id": "1",
"params": {},
"schema": "metric",
"type": "count"
},
{
"enabled": true,
"id": "2",
"params": {
"field": "auditd.summary.actor.primary",
"order": "desc",
"orderBy": "1",
"size": 10
},
"schema": "segment",
"type": "terms"
}
],
"params": {
"maxFontSize": 45,
"minFontSize": 18,
"orientation": "single",
"scale": "linear"
},
"title": "Primary Username Tag Cloud [Auditbeat Auditd]",
"type": "tagcloud"
}
},
"id": "f81a6de0-c1c1-11e7-8995-936807a28b16",
"type": "visualization",
"updated_at": "2018-01-16T22:12:18.730Z",
"version": 3
},
{
"attributes": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": {
"filter": [],
"query": {
"language": "lucene",
"query": ""
}
}
},
"savedSearchId": "d382f5b0-c1c6-11e7-8995-936807a28b16",
"title": "Exe Name Tag Cloud [Auditbeat Auditd Executions]",
"uiStateJSON": {},
"version": 1,
"visState": {
"aggs": [
{
"enabled": true,
"id": "1",
"params": {},
"schema": "metric",
"type": "count"
},
{
"enabled": true,
"id": "2",
"params": {
"field": "process.exe",
"order": "desc",
"orderBy": "1",
"size": 10
},
"schema": "segment",
"type": "terms"
}
],
"params": {
"maxFontSize": 45,
"minFontSize": 14,
"orientation": "single",
"scale": "linear"
},
"title": "Exe Name Tag Cloud [Auditbeat Auditd Executions]",
"type": "tagcloud"
}
},
"id": "2efac370-c1ca-11e7-8995-936807a28b16",
"type": "visualization",
"updated_at": "2018-01-16T22:57:41.411Z",
"version": 4
},
{
"attributes": {
"columns": [
"beat.hostname",
"process.args",
"auditd.summary.actor.primary",
"auditd.summary.actor.secondary",
"process.exe"
],
"description": "",
"hits": 0,
"kibanaSavedObjectMeta": {
"searchSourceJSON": {
"filter": [
{
"$state": {
"store": "appState"
},
"meta": {
"alias": null,
"disabled": false,
"index": "auditbeat-*",
"key": "event.module",
"negate": false,
"params": {
"query": "auditd",
"type": "phrase"
},
"type": "phrase",
"value": "auditd"
},
"query": {
"match": {
"event.module": {
"query": "auditd",
"type": "phrase"
}
}
}
},
{
"$state": {
"store": "appState"
},
"meta": {
"alias": null,
"disabled": false,
"index": "auditbeat-*",
"key": "event.action",
"negate": false,
"params": {
"query": "executed",
"type": "phrase"
},
"type": "phrase",
"value": "executed"
},
"query": {
"match": {
"event.action": {
"query": "executed",
"type": "phrase"
}
}
}
}
],
"highlightAll": true,
"index": "auditbeat-*",
"query": {
"language": "lucene",
"query": "*"
},
"version": true
}
},
"sort": [
"@timestamp",
"desc"
],
"title": "Process Executions [Auditbeat Auditd]",
"version": 1
},
"id": "d382f5b0-c1c6-11e7-8995-936807a28b16",
"type": "search",
"updated_at": "2018-01-16T22:26:35.050Z",
"version": 5
},
{
"attributes": {
"description": "",
"hits": 0,
"kibanaSavedObjectMeta": {
"searchSourceJSON": {
"filter": [],
"highlightAll": true,
"query": {
"language": "lucene",
"query": ""
},
"version": true
}
},
"optionsJSON": {
"darkTheme": false,
"useMargins": false
},
"panelsJSON": [
{
"gridData": {
"h": 3,
"i": "1",
"w": 4,
"x": 4,
"y": 0
},
"id": "20a8e8d0-c1c8-11e7-8995-936807a28b16",
"panelIndex": "1",
"type": "visualization",
"version": "6.2.4"
},
{
"gridData": {
"h": 3,
"i": "3",
"w": 4,
"x": 8,
"y": 0
},
"id": "f81a6de0-c1c1-11e7-8995-936807a28b16",
"panelIndex": "3",
"type": "visualization",
"version": "6.2.4"
},
{
"gridData": {
"h": 3,
"i": "5",
"w": 4,
"x": 0,
"y": 0
},
"id": "2efac370-c1ca-11e7-8995-936807a28b16",
"panelIndex": "5",
"type": "visualization",
"version": "6.2.4"
},
{
"gridData": {
"h": 5,
"i": "6",
"w": 12,
"x": 0,
"y": 3
},
"id": "d382f5b0-c1c6-11e7-8995-936807a28b16",
"panelIndex": "6",
"type": "search",
"version": "6.2.4"
}
],
"timeRestore": false,
"title": "[Auditbeat Auditd] Executions",
"version": 1
},
"id": "7de391b0-c1ca-11e7-8995-936807a28b16",
"type": "dashboard",
"updated_at": "2018-01-16T22:58:11.243Z",
"version": 5
}
],
"version": "6.2.4"
}

View File

@ -1,86 +1,283 @@
{
"objects": [
{
"attributes": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{}"
},
"title": "Event Actions [Auditbeat Auditd Overview]",
"uiStateJSON": "{}",
"version": 1,
"visState": "{\n \"title\": \"Event Actions [Auditbeat Auditd Overview]\",\n \"type\": \"metrics\",\n \"params\": {\n \"id\": \"61ca57f0-469d-11e7-af02-69e470af7417\",\n \"type\": \"timeseries\",\n \"series\": [\n {\n \"id\": \"61ca57f1-469d-11e7-af02-69e470af7417\",\n \"color\": \"#68BC00\",\n \"split_mode\": \"terms\",\n \"metrics\": [\n {\n \"id\": \"6b9fb2d0-c1bc-11e7-938f-ab0645b6c431\",\n \"type\": \"count\"\n }\n ],\n \"seperate_axis\": 0,\n \"axis_position\": \"right\",\n \"formatter\": \"number\",\n \"chart_type\": \"line\",\n \"line_width\": 1,\n \"point_size\": 1,\n \"fill\": 0.5,\n \"stacked\": \"none\",\n \"terms_field\": \"event.action\",\n \"label\": \"Actions\"\n }\n ],\n \"time_field\": \"@timestamp\",\n \"index_pattern\": \"auditbeat-*\",\n \"interval\": \"auto\",\n \"axis_position\": \"left\",\n \"axis_formatter\": \"number\",\n \"show_legend\": 1,\n \"show_grid\": 1,\n \"filter\": \"event.module:auditd\",\n \"background_color_rules\": [\n {\n \"id\": \"58c95a20-c1bd-11e7-938f-ab0645b6c431\"\n }\n ],\n \"bar_color_rules\": [\n {\n \"id\": \"5bfc71a0-c1bd-11e7-938f-ab0645b6c431\"\n }\n ],\n \"gauge_color_rules\": [\n {\n \"id\": \"5d20a650-c1bd-11e7-938f-ab0645b6c431\"\n }\n ],\n \"gauge_width\": 10,\n \"gauge_inner_width\": 10,\n \"gauge_style\": \"half\",\n \"legend_position\": \"left\"\n },\n \"aggs\": []\n}"
},
"id": "97680df0-c1c0-11e7-8995-936807a28b16",
"type": "visualization",
"updated_at": "2018-01-16T22:11:01.438Z",
"version": 3
},
{
"attributes": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\n \"index\": \"auditbeat-*\",\n \"filter\": [],\n \"query\": {\n \"query\": \"\",\n \"language\": \"lucene\"\n }\n}"
},
"savedSearchId": "0f10c430-c1c3-11e7-8995-936807a28b16",
"title": "Event Categories [Auditbeat Auditd]",
"uiStateJSON": "{}",
"version": 1,
"visState": "{\n \"title\": \"Event Categories [Auditbeat Auditd]\",\n \"type\": \"pie\",\n \"params\": {\n \"type\": \"pie\",\n \"addTooltip\": true,\n \"addLegend\": true,\n \"legendPosition\": \"right\",\n \"isDonut\": true\n },\n \"aggs\": [\n {\n \"id\": \"1\",\n \"enabled\": true,\n \"type\": \"count\",\n \"schema\": \"metric\",\n \"params\": {}\n },\n {\n \"id\": \"2\",\n \"enabled\": true,\n \"type\": \"terms\",\n \"schema\": \"segment\",\n \"params\": {\n \"field\": \"event.category\",\n \"size\": 5,\n \"order\": \"desc\",\n \"orderBy\": \"1\",\n \"customLabel\": \"Category\"\n }\n },\n {\n \"id\": \"3\",\n \"enabled\": true,\n \"type\": \"terms\",\n \"schema\": \"segment\",\n \"params\": {\n \"field\": \"event.action\",\n \"size\": 20,\n \"order\": \"desc\",\n \"orderBy\": \"1\",\n \"customLabel\": \"Action\"\n }\n }\n ]\n}"
},
"id": "08679220-c25a-11e7-8692-232bd1143e8a",
"type": "visualization",
"updated_at": "2018-01-16T22:54:10.330Z",
"version": 4
},
{
"attributes": {
"columns": [
"beat.hostname",
"auditd.summary.actor.primary",
"auditd.summary.actor.secondary",
"event.action",
"auditd.summary.object.type",
"auditd.summary.object.primary",
"auditd.summary.object.secondary",
"auditd.summary.how",
"auditd.result"
],
"description": "",
"hits": 0,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"auditbeat-*\",\"highlightAll\":true,\"version\":true,\"query\":{\"query\":\"\",\"language\":\"lucene\"},\"filter\":[{\"meta\":{\"negate\":false,\"index\":\"auditbeat-*\",\"type\":\"phrase\",\"key\":\"event.module\",\"value\":\"auditd\",\"params\":{\"query\":\"auditd\",\"type\":\"phrase\"},\"disabled\":false,\"alias\":null},\"query\":{\"match\":{\"event.module\":{\"query\":\"auditd\",\"type\":\"phrase\"}}},\"$state\":{\"store\":\"appState\"}}]}"
},
"sort": [
"@timestamp",
"desc"
],
"title": "Audit Event Table [Auditbeat Auditd]",
"version": 1
},
"id": "0f10c430-c1c3-11e7-8995-936807a28b16",
"type": "search",
"updated_at": "2018-01-16T22:51:24.572Z",
"version": 4
},
{
"attributes": {
"description": "Summary of Linux kernel audit events.",
"hits": 0,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"query\":{\"language\":\"lucene\",\"query\":\"\"},\"filter\":[],\"highlightAll\":true,\"version\":true}"
},
"optionsJSON": "{\"darkTheme\":false,\"useMargins\":false}",
"panelsJSON": "[{\"gridData\":{\"h\":3,\"i\":\"1\",\"w\":7,\"x\":0,\"y\":0},\"id\":\"97680df0-c1c0-11e7-8995-936807a28b16\",\"panelIndex\":\"1\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"gridData\":{\"h\":3,\"i\":\"4\",\"w\":5,\"x\":7,\"y\":0},\"id\":\"08679220-c25a-11e7-8692-232bd1143e8a\",\"panelIndex\":\"4\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"gridData\":{\"h\":5,\"i\":\"5\",\"w\":12,\"x\":0,\"y\":3},\"id\":\"0f10c430-c1c3-11e7-8995-936807a28b16\",\"panelIndex\":\"5\",\"type\":\"search\",\"version\":\"6.2.4\"}]",
"timeRestore": false,
"title": "[Auditbeat Auditd] Overview",
"version": 1
},
"id": "c0ac2c00-c1c0-11e7-8995-936807a28b16",
"type": "dashboard",
"updated_at": "2018-01-16T22:55:17.775Z",
"version": 5
}
],
"version": "6.2.4"
}
"objects": [
{
"attributes": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": {}
},
"title": "Event Actions [Auditbeat Auditd Overview]",
"uiStateJSON": {},
"version": 1,
"visState": {
"aggs": [],
"params": {
"axis_formatter": "number",
"axis_position": "left",
"background_color_rules": [
{
"id": "58c95a20-c1bd-11e7-938f-ab0645b6c431"
}
],
"bar_color_rules": [
{
"id": "5bfc71a0-c1bd-11e7-938f-ab0645b6c431"
}
],
"filter": "event.module:auditd",
"gauge_color_rules": [
{
"id": "5d20a650-c1bd-11e7-938f-ab0645b6c431"
}
],
"gauge_inner_width": 10,
"gauge_style": "half",
"gauge_width": 10,
"id": "61ca57f0-469d-11e7-af02-69e470af7417",
"index_pattern": "auditbeat-*",
"interval": "auto",
"legend_position": "left",
"series": [
{
"axis_position": "right",
"chart_type": "line",
"color": "#68BC00",
"fill": 0.5,
"formatter": "number",
"id": "61ca57f1-469d-11e7-af02-69e470af7417",
"label": "Actions",
"line_width": 1,
"metrics": [
{
"id": "6b9fb2d0-c1bc-11e7-938f-ab0645b6c431",
"type": "count"
}
],
"point_size": 1,
"seperate_axis": 0,
"split_mode": "terms",
"stacked": "none",
"terms_field": "event.action"
}
],
"show_grid": 1,
"show_legend": 1,
"time_field": "@timestamp",
"type": "timeseries"
},
"title": "Event Actions [Auditbeat Auditd Overview]",
"type": "metrics"
}
},
"id": "97680df0-c1c0-11e7-8995-936807a28b16",
"type": "visualization",
"updated_at": "2018-01-16T22:11:01.438Z",
"version": 3
},
{
"attributes": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": {
"filter": [],
"index": "auditbeat-*",
"query": {
"language": "lucene",
"query": ""
}
}
},
"savedSearchId": "0f10c430-c1c3-11e7-8995-936807a28b16",
"title": "Event Categories [Auditbeat Auditd]",
"uiStateJSON": {},
"version": 1,
"visState": {
"aggs": [
{
"enabled": true,
"id": "1",
"params": {},
"schema": "metric",
"type": "count"
},
{
"enabled": true,
"id": "2",
"params": {
"customLabel": "Category",
"field": "event.category",
"order": "desc",
"orderBy": "1",
"size": 5
},
"schema": "segment",
"type": "terms"
},
{
"enabled": true,
"id": "3",
"params": {
"customLabel": "Action",
"field": "event.action",
"order": "desc",
"orderBy": "1",
"size": 20
},
"schema": "segment",
"type": "terms"
}
],
"params": {
"addLegend": true,
"addTooltip": true,
"isDonut": true,
"legendPosition": "right",
"type": "pie"
},
"title": "Event Categories [Auditbeat Auditd]",
"type": "pie"
}
},
"id": "08679220-c25a-11e7-8692-232bd1143e8a",
"type": "visualization",
"updated_at": "2018-01-16T22:54:10.330Z",
"version": 4
},
{
"attributes": {
"columns": [
"beat.hostname",
"auditd.summary.actor.primary",
"auditd.summary.actor.secondary",
"event.action",
"auditd.summary.object.type",
"auditd.summary.object.primary",
"auditd.summary.object.secondary",
"auditd.summary.how",
"auditd.result"
],
"description": "",
"hits": 0,
"kibanaSavedObjectMeta": {
"searchSourceJSON": {
"filter": [
{
"$state": {
"store": "appState"
},
"meta": {
"alias": null,
"disabled": false,
"index": "auditbeat-*",
"key": "event.module",
"negate": false,
"params": {
"query": "auditd",
"type": "phrase"
},
"type": "phrase",
"value": "auditd"
},
"query": {
"match": {
"event.module": {
"query": "auditd",
"type": "phrase"
}
}
}
}
],
"highlightAll": true,
"index": "auditbeat-*",
"query": {
"language": "lucene",
"query": ""
},
"version": true
}
},
"sort": [
"@timestamp",
"desc"
],
"title": "Audit Event Table [Auditbeat Auditd]",
"version": 1
},
"id": "0f10c430-c1c3-11e7-8995-936807a28b16",
"type": "search",
"updated_at": "2018-01-16T22:51:24.572Z",
"version": 4
},
{
"attributes": {
"description": "Summary of Linux kernel audit events.",
"hits": 0,
"kibanaSavedObjectMeta": {
"searchSourceJSON": {
"filter": [],
"highlightAll": true,
"query": {
"language": "lucene",
"query": ""
},
"version": true
}
},
"optionsJSON": {
"darkTheme": false,
"useMargins": false
},
"panelsJSON": [
{
"gridData": {
"h": 3,
"i": "1",
"w": 7,
"x": 0,
"y": 0
},
"id": "97680df0-c1c0-11e7-8995-936807a28b16",
"panelIndex": "1",
"type": "visualization",
"version": "6.2.4"
},
{
"gridData": {
"h": 3,
"i": "4",
"w": 5,
"x": 7,
"y": 0
},
"id": "08679220-c25a-11e7-8692-232bd1143e8a",
"panelIndex": "4",
"type": "visualization",
"version": "6.2.4"
},
{
"gridData": {
"h": 5,
"i": "5",
"w": 12,
"x": 0,
"y": 3
},
"id": "0f10c430-c1c3-11e7-8995-936807a28b16",
"panelIndex": "5",
"type": "search",
"version": "6.2.4"
}
],
"timeRestore": false,
"title": "[Auditbeat Auditd] Overview",
"version": 1
},
"id": "c0ac2c00-c1c0-11e7-8995-936807a28b16",
"type": "dashboard",
"updated_at": "2018-01-16T22:55:17.775Z",
"version": 5
}
],
"version": "6.2.4"
}

View File

@ -1,10 +1,29 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package auditd
import (
"fmt"
"os"
"runtime"
"strconv"
"strings"
"sync"
"syscall"
"time"
@ -18,6 +37,7 @@ import (
"github.com/elastic/go-libaudit"
"github.com/elastic/go-libaudit/aucoalesce"
"github.com/elastic/go-libaudit/auparse"
"github.com/elastic/go-libaudit/rule"
)
const (
@ -27,11 +47,25 @@ const (
unicast = "unicast"
multicast = "multicast"
lostEventsUpdateInterval = time.Second * 15
maxDefaultStreamBufferConsumers = 4
)
type backpressureStrategy uint8
const (
bsKernel backpressureStrategy = 1 << iota
bsUserSpace
bsAuto
)
var (
auditdMetrics = monitoring.Default.NewRegistry(moduleName)
lostMetric = monitoring.NewInt(auditdMetrics, "lost")
auditdMetrics = monitoring.Default.NewRegistry(moduleName)
reassemblerGapsMetric = monitoring.NewInt(auditdMetrics, "reassembler_seq_gaps")
kernelLostMetric = monitoring.NewInt(auditdMetrics, "kernel_lost")
userspaceLostMetric = monitoring.NewInt(auditdMetrics, "userspace_lost")
receivedMetric = monitoring.NewInt(auditdMetrics, "received_msgs")
)
func init() {
@ -48,9 +82,14 @@ func init() {
// does not rely on polling.
type MetricSet struct {
mb.BaseMetricSet
config Config
client *libaudit.AuditClient
log *logp.Logger
config Config
client *libaudit.AuditClient
log *logp.Logger
kernelLost struct {
enabled bool
counter uint32
}
backpressureStrategy backpressureStrategy
}
// New constructs a new MetricSet.
@ -69,13 +108,17 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) {
return nil, errors.Wrap(err, "failed to create audit client")
}
lostMetric.Set(0)
reassemblerGapsMetric.Set(0)
kernelLostMetric.Set(0)
userspaceLostMetric.Set(0)
receivedMetric.Set(0)
return &MetricSet{
BaseMetricSet: base,
client: client,
config: config,
log: log,
BaseMetricSet: base,
client: client,
config: config,
log: log,
backpressureStrategy: getBackpressureStrategy(config.BackpressureStrategy, log),
}, nil
}
@ -111,21 +154,61 @@ func (ms *MetricSet) Run(reporter mb.PushReporterV2) {
return
}
for {
select {
case <-reporter.Done():
return
case msgs := <-out:
reporter.Event(buildMetricbeatEvent(msgs, ms.config))
if ms.kernelLost.enabled {
client, err := libaudit.NewAuditClient(nil)
if err != nil {
reporter.Error(err)
ms.log.Errorw("Failure creating audit monitoring client", "error", err)
}
go func() {
defer client.Close()
timer := time.NewTicker(lostEventsUpdateInterval)
defer timer.Stop()
for {
select {
case <-reporter.Done():
return
case <-timer.C:
if status, err := client.GetStatus(); err == nil {
ms.updateKernelLostMetric(status.Lost)
} else {
ms.log.Error("get status request failed:", err)
}
}
}
}()
}
// Spawn the stream buffer consumers
numConsumers := ms.config.StreamBufferConsumers
// By default (stream_buffer_consumers=0) use as many consumers as local CPUs
// with a max of `maxDefaultStreamBufferConsumers`
if numConsumers == 0 {
if numConsumers = runtime.GOMAXPROCS(-1); numConsumers > maxDefaultStreamBufferConsumers {
numConsumers = maxDefaultStreamBufferConsumers
}
}
var wg sync.WaitGroup
wg.Add(numConsumers)
for i := 0; i < numConsumers; i++ {
go func() {
defer wg.Done()
for {
select {
case <-reporter.Done():
return
case msgs := <-out:
reporter.Event(buildMetricbeatEvent(msgs, ms.config))
}
}
}()
}
wg.Wait()
}
func (ms *MetricSet) addRules(reporter mb.PushReporterV2) error {
rules, err := ms.config.rules()
if err != nil {
return errors.Wrap(err, "failed to add rules")
}
rules := ms.config.rules()
if len(rules) == 0 {
ms.log.Info("No audit_rules were specified.")
@ -157,6 +240,12 @@ func (ms *MetricSet) addRules(reporter mb.PushReporterV2) error {
}
ms.log.Infof("Deleted %v pre-existing audit rules.", n)
// Add rule to ignore syscalls from this process
if rule, err := buildPIDIgnoreRule(os.Getpid()); err == nil {
rules = append([]auditRule{rule}, rules...)
} else {
ms.log.Errorf("Failed to build a rule to ignore self: %v", err)
}
// Add rules from config.
var failCount int
for _, rule := range rules {
@ -189,6 +278,9 @@ func (ms *MetricSet) initClient() error {
if err != nil {
return errors.Wrap(err, "failed to get audit status")
}
ms.kernelLost.enabled = true
ms.kernelLost.counter = status.Lost
ms.log.Infow("audit status from kernel at start", "audit_status", status)
if status.Enabled == auditLocked {
@ -201,18 +293,44 @@ func (ms *MetricSet) initClient() error {
}
}
if status.RateLimit != ms.config.RateLimit {
if err = ms.client.SetRateLimit(ms.config.RateLimit, libaudit.NoWait); err != nil {
return errors.Wrap(err, "failed to set audit rate limit in kernel")
}
}
if status.BacklogLimit != ms.config.BacklogLimit {
if err = ms.client.SetBacklogLimit(ms.config.BacklogLimit, libaudit.NoWait); err != nil {
return errors.Wrap(err, "failed to set audit backlog limit in kernel")
}
}
if ms.backpressureStrategy&(bsKernel|bsAuto) != 0 {
// "kernel" backpressure mitigation strategy
//
// configure the kernel to drop audit events immediately if the
// backlog queue is full.
if status.FeatureBitmap&libaudit.AuditFeatureBitmapBacklogWaitTime != 0 {
ms.log.Info("Setting kernel backlog wait time to prevent backpressure propagating to the kernel.")
if err = ms.client.SetBacklogWaitTime(0, libaudit.NoWait); err != nil {
return errors.Wrap(err, "failed to set audit backlog wait time in kernel")
}
} else {
if ms.backpressureStrategy == bsAuto {
ms.log.Warn("setting backlog wait time is not supported in this kernel. Enabling workaround.")
ms.backpressureStrategy |= bsUserSpace
} else {
return errors.New("kernel backlog wait time not supported by kernel, but required by backpressure_strategy")
}
}
}
if ms.backpressureStrategy&(bsKernel|bsUserSpace) == bsUserSpace && ms.config.RateLimit == 0 {
// force a rate limit if the user-space strategy will be used without
// corresponding backlog_wait_time setting in the kernel
ms.config.RateLimit = 5000
}
if status.RateLimit != ms.config.RateLimit {
if err = ms.client.SetRateLimit(ms.config.RateLimit, libaudit.NoWait); err != nil {
return errors.Wrap(err, "failed to set audit rate limit in kernel")
}
}
if status.Enabled == 0 {
if err = ms.client.SetEnabled(true, libaudit.NoWait); err != nil {
return errors.Wrap(err, "failed to enable auditing in the kernel")
@ -230,32 +348,65 @@ func (ms *MetricSet) initClient() error {
return nil
}
func (ms *MetricSet) updateKernelLostMetric(lost uint32) {
if !ms.kernelLost.enabled {
return
}
delta := int64(lost - ms.kernelLost.counter)
if delta >= 0 {
logFn := ms.log.Debugf
if delta > 0 {
logFn = ms.log.Infof
kernelLostMetric.Add(delta)
}
logFn("kernel lost events: %d (total: %d)", delta, lost)
} else {
ms.log.Warnf("kernel lost event counter reset from %d to %d", ms.kernelLost, lost)
}
ms.kernelLost.counter = lost
}
func (ms *MetricSet) receiveEvents(done <-chan struct{}) (<-chan []*auparse.AuditMessage, error) {
if err := ms.initClient(); err != nil {
return nil, err
}
out := make(chan []*auparse.AuditMessage, ms.config.StreamBufferQueueSize)
reassembler, err := libaudit.NewReassembler(int(ms.config.ReassemblerMaxInFlight), ms.config.ReassemblerTimeout, &stream{done, out})
var st libaudit.Stream = &stream{done, out}
if ms.backpressureStrategy&bsUserSpace != 0 {
// "user-space" backpressure mitigation strategy
//
// Consume events from our side as fast as possible, by dropping events
// if the publishing pipeline would block.
ms.log.Info("Using non-blocking stream to prevent backpressure propagating to the kernel.")
st = &nonBlockingStream{done, out}
}
reassembler, err := libaudit.NewReassembler(int(ms.config.ReassemblerMaxInFlight), ms.config.ReassemblerTimeout, st)
if err != nil {
return nil, errors.Wrap(err, "failed to create Reassembler")
}
go maintain(done, reassembler)
go func() {
defer ms.log.Debug("receiveEvents goroutine exited")
defer close(out)
defer reassembler.Close()
for {
raw, err := ms.client.Receive(false)
if err != nil {
if errors.Cause(err) == syscall.EBADF {
// Client has been closed.
break
}
continue
}
if filterRecordType(raw.Type) {
continue
}
receivedMetric.Inc()
if err := reassembler.Push(raw.Type, raw.Data); err != nil {
ms.log.Debugw("Dropping audit message",
"record_type", raw.Type,
@ -289,8 +440,14 @@ func maintain(done <-chan struct{}, reassembler *libaudit.Reassembler) {
}
func filterRecordType(typ auparse.AuditMessageType) bool {
switch {
// REPLACE messages are tests to check if Auditbeat is still healthy by
// seeing if unicast messages can be sent without error from the kernel.
// Ignore them.
case typ == auparse.AUDIT_REPLACE:
return true
// Messages from 1300-2999 are valid audit message types.
if typ < auparse.AUDIT_USER_AUTH || typ > auparse.AUDIT_LAST_USER_MSG2 {
case typ < auparse.AUDIT_USER_AUTH || typ > auparse.AUDIT_LAST_USER_MSG2:
return true
}
@ -531,7 +688,26 @@ func (s *stream) ReassemblyComplete(msgs []*auparse.AuditMessage) {
}
func (s *stream) EventsLost(count int) {
lostMetric.Inc()
reassemblerGapsMetric.Add(int64(count))
}
// nonBlockingStream behaves as stream above, except that it will never block
// on backpressure from the publishing pipeline.
// Instead, events will be discarded.
type nonBlockingStream stream
func (s *nonBlockingStream) ReassemblyComplete(msgs []*auparse.AuditMessage) {
select {
case <-s.done:
return
case s.out <- msgs:
default:
userspaceLostMetric.Add(int64(len(msgs)))
}
}
func (s *nonBlockingStream) EventsLost(count int) {
(*stream)(s).EventsLost(count)
}
func hasMulticastSupport() bool {
@ -605,7 +781,7 @@ func determineSocketType(c *Config, log *logp.Logger) (string, error) {
}
return c.SocketType, nil
}
rules, _ := c.rules()
rules := c.rules()
isLocked := status.Enabled == auditLocked
hasMulticast := hasMulticastSupport()
@ -663,3 +839,44 @@ func determineSocketType(c *Config, log *logp.Logger) (string, error) {
}
}
func getBackpressureStrategy(value string, logger *logp.Logger) backpressureStrategy {
switch value {
case "kernel":
return bsKernel
case "userspace", "user-space":
return bsUserSpace
case "auto":
return bsAuto
case "both":
return bsKernel | bsUserSpace
case "none":
return 0
default:
logger.Warn("Unknown value for the 'backpressure_strategy' option. Using default.")
fallthrough
case "", "default":
return bsAuto
}
}
func buildPIDIgnoreRule(pid int) (ruleData auditRule, err error) {
r := rule.SyscallRule{
Type: rule.AppendSyscallRuleType,
List: "exit",
Action: "never",
Filters: []rule.FilterSpec{
{
Type: rule.ValueFilterType,
LHS: "pid",
Comparator: "=",
RHS: strconv.Itoa(pid),
},
},
Syscalls: []string{"all"},
Keys: nil,
}
ruleData.flags = fmt.Sprintf("-A exit,never -F pid=%d -S all", pid)
ruleData.data, err = rule.Build(&r)
return ruleData, err
}

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package auditd
import (
@ -16,6 +33,7 @@ import (
"github.com/elastic/beats/auditbeat/core"
"github.com/elastic/beats/libbeat/logp"
"github.com/elastic/beats/metricbeat/mb"
mbtest "github.com/elastic/beats/metricbeat/mb/testing"
"github.com/elastic/go-libaudit"
"github.com/elastic/go-libaudit/auparse"
@ -56,7 +74,7 @@ func TestData(t *testing.T) {
// Get Status response for initClient
returnACK().returnStatus().
// Send expected ACKs for initialization
returnACK().returnACK().returnACK().returnACK().
returnACK().returnACK().returnACK().returnACK().returnACK().
// Send a single audit message from the kernel.
returnMessage(userLoginMsg)
@ -67,17 +85,13 @@ func TestData(t *testing.T) {
auditMetricSet.client = &libaudit.AuditClient{Netlink: mock}
events := mbtest.RunPushMetricSetV2(10*time.Second, 1, ms)
for _, e := range events {
if e.Error != nil {
t.Fatalf("received error: %+v", e.Error)
}
}
if len(events) == 0 {
t.Fatal("received no events")
}
assertNoErrors(t, events)
beatEvent := mbtest.StandardizeEvent(ms, events[0], core.AddDatasetToEvent)
mbtest.WriteEventToDataJSON(t, beatEvent)
mbtest.WriteEventToDataJSON(t, beatEvent, "")
}
func getConfig() map[string]interface{} {
@ -110,23 +124,8 @@ func TestUnicastClient(t *testing.T) {
ms := mbtest.NewPushMetricSetV2(t, c)
events := mbtest.RunPushMetricSetV2(5*time.Second, 0, ms)
for _, e := range events {
t.Log(e)
if e.Error != nil {
t.Errorf("received error: %+v", e.Error)
}
}
for _, e := range events {
v, err := e.MetricSetFields.GetValue("thing.primary")
if err == nil {
if exe, ok := v.(string); ok && exe == "/bin/cat" {
return
}
}
}
assert.Fail(t, "expected an execve event for /bin/cat")
assertNoErrors(t, events)
assertHasBinCatExecve(t, events)
}
func TestMulticastClient(t *testing.T) {
@ -155,14 +154,8 @@ func TestMulticastClient(t *testing.T) {
ms := mbtest.NewPushMetricSetV2(t, c)
events := mbtest.RunPushMetricSetV2(5*time.Second, 0, ms)
for _, e := range events {
if e.Error != nil {
t.Fatalf("received error: %+v", e.Error)
}
}
// The number of events is non-deterministic so there is no validation.
t.Logf("received %d messages via multicast", len(events))
assertNoErrors(t, events)
assertHasBinCatExecve(t, events)
}
func TestKernelVersion(t *testing.T) {
@ -224,3 +217,29 @@ func buildSampleEvent(t testing.TB, lines []string, filename string) {
t.Fatal(err)
}
}
func assertHasBinCatExecve(t *testing.T, events []mb.Event) {
t.Helper()
for _, e := range events {
v, err := e.RootFields.GetValue("process.exe")
if err == nil {
if exe, ok := v.(string); ok && exe == "/bin/cat" {
return
}
}
}
assert.Fail(t, "expected an execve event for /bin/cat")
}
func assertNoErrors(t *testing.T, events []mb.Event) {
t.Helper()
for _, e := range events {
t.Log(e)
if e.Error != nil {
t.Errorf("received error: %+v", e.Error)
}
}
}

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// +build !linux
package auditd

View File

@ -1,8 +1,30 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package auditd
import (
"bufio"
"bytes"
"fmt"
"io"
"os"
"path/filepath"
"sort"
"strings"
"time"
@ -14,25 +36,35 @@ import (
)
const (
moduleName = "auditd"
metricsetName = "auditd"
moduleName = "auditd"
metricsetName = "auditd"
recursiveGlobDepth = 8
)
// Config defines the kernel metricset's possible configuration options.
type Config struct {
ResolveIDs bool `config:"resolve_ids"` // Resolve UID/GIDs to names.
FailureMode string `config:"failure_mode"` // Failure mode for the kernel (silent, log, panic).
BacklogLimit uint32 `config:"backlog_limit"` // Max number of message to buffer in the auditd.
RateLimit uint32 `config:"rate_limit"` // Rate limit in messages/sec of messages from auditd.
RawMessage bool `config:"include_raw_message"` // Include the list of raw audit messages in the event.
Warnings bool `config:"include_warnings"` // Include warnings in the event (for dev/debug purposes only).
RulesBlob string `config:"audit_rules"` // Audit rules. One rule per line.
SocketType string `config:"socket_type"` // Socket type to use with the kernel (unicast or multicast).
ResolveIDs bool `config:"resolve_ids"` // Resolve UID/GIDs to names.
FailureMode string `config:"failure_mode"` // Failure mode for the kernel (silent, log, panic).
BacklogLimit uint32 `config:"backlog_limit"` // Max number of message to buffer in the auditd.
RateLimit uint32 `config:"rate_limit"` // Rate limit in messages/sec of messages from auditd.
RawMessage bool `config:"include_raw_message"` // Include the list of raw audit messages in the event.
Warnings bool `config:"include_warnings"` // Include warnings in the event (for dev/debug purposes only).
RulesBlob string `config:"audit_rules"` // Audit rules. One rule per line.
RuleFiles []string `config:"audit_rule_files"` // List of rule files.
SocketType string `config:"socket_type"` // Socket type to use with the kernel (unicast or multicast).
// Tuning options (advanced, use with care)
ReassemblerMaxInFlight uint32 `config:"reassembler.max_in_flight"`
ReassemblerTimeout time.Duration `config:"reassembler.timeout"`
StreamBufferQueueSize uint32 `config:"reassembler.queue_size"`
// BackpressureStrategy defines the strategy used to mitigate backpressure
// propagating to the kernel causing audited processes to block until
// Auditbeat can keep-up.
// One of "user-space", "kernel", "both", "none", "auto" (default)
BackpressureStrategy string `config:"backpressure_strategy"`
StreamBufferConsumers int `config:"stream_buffer_consumers"`
auditRules []auditRule
}
type auditRule struct {
@ -40,10 +72,30 @@ type auditRule struct {
data []byte
}
type ruleWithSource struct {
rule auditRule
source string
}
type ruleSet map[string]ruleWithSource
var defaultConfig = Config{
ResolveIDs: true,
FailureMode: "silent",
BacklogLimit: 8192,
RateLimit: 0,
RawMessage: false,
Warnings: false,
ReassemblerMaxInFlight: 50,
ReassemblerTimeout: 2 * time.Second,
StreamBufferQueueSize: 8192,
StreamBufferConsumers: 0,
}
// Validate validates the rules specified in the config.
func (c *Config) Validate() error {
var errs multierror.Errors
_, err := c.rules()
err := c.loadRules()
if err != nil {
errs = append(errs, err)
}
@ -64,47 +116,46 @@ func (c *Config) Validate() error {
}
// Rules returns a list of rules specified in the config.
func (c Config) rules() ([]auditRule, error) {
var errs multierror.Errors
var auditRules []auditRule
ruleSet := map[string]auditRule{}
s := bufio.NewScanner(bytes.NewBufferString(c.RulesBlob))
for s.Scan() {
line := strings.TrimSpace(s.Text())
if len(line) == 0 || line[0] == '#' {
continue
}
func (c Config) rules() []auditRule {
return c.auditRules
}
// Parse the CLI flags into an intermediate rule specification.
r, err := flags.Parse(line)
func (c *Config) loadRules() error {
var paths []string
for _, pattern := range c.RuleFiles {
absPattern, err := filepath.Abs(pattern)
if err != nil {
errs = append(errs, errors.Wrapf(err, "failed on rule '%v'", line))
continue
return fmt.Errorf("unable to get the absolute path for %s: %v", pattern, err)
}
// Convert rule specification to a binary rule representation.
data, err := rule.Build(r)
files, err := filepath.Glob(absPattern)
if err != nil {
errs = append(errs, errors.Wrapf(err, "failed on rule '%v'", line))
continue
return err
}
// Detect duplicates based on the normalized binary rule representation.
existingRule, found := ruleSet[string(data)]
if found {
errs = append(errs, errors.Errorf("failed on rule '%v' because its a duplicate of '%v'", line, existingRule.flags))
continue
}
auditRule := auditRule{flags: line, data: []byte(data)}
ruleSet[string(data)] = auditRule
auditRules = append(auditRules, auditRule)
sort.Strings(files)
paths = append(paths, files...)
}
if len(errs) > 0 {
return nil, errors.Wrap(errs.Err(), "invalid audit_rules")
knownRules := ruleSet{}
rules, err := readRules(bytes.NewBufferString(c.RulesBlob), "(audit_rules at auditbeat.yml)", knownRules)
if err != nil {
return err
}
return auditRules, nil
c.auditRules = append(c.auditRules, rules...)
for _, filename := range paths {
fHandle, err := os.Open(filename)
if err != nil {
return fmt.Errorf("unable to open rule file '%s': %v", filename, err)
}
rules, err = readRules(fHandle, filename, knownRules)
if err != nil {
return err
}
c.auditRules = append(c.auditRules, rules...)
}
return nil
}
func (c Config) failureMode() (uint32, error) {
@ -120,14 +171,45 @@ func (c Config) failureMode() (uint32, error) {
}
}
var defaultConfig = Config{
ResolveIDs: true,
FailureMode: "silent",
BacklogLimit: 8192,
RateLimit: 0,
RawMessage: false,
Warnings: false,
ReassemblerMaxInFlight: 50,
ReassemblerTimeout: 2 * time.Second,
StreamBufferQueueSize: 64,
func readRules(reader io.Reader, source string, knownRules ruleSet) (rules []auditRule, err error) {
var errs multierror.Errors
s := bufio.NewScanner(reader)
for lineNum := 1; s.Scan(); lineNum++ {
location := fmt.Sprintf("%s:%d", source, lineNum)
line := strings.TrimSpace(s.Text())
if len(line) == 0 || line[0] == '#' {
continue
}
// Parse the CLI flags into an intermediate rule specification.
r, err := flags.Parse(line)
if err != nil {
errs = append(errs, errors.Wrapf(err, "at %s: failed to parse rule '%v'", location, line))
continue
}
// Convert rule specification to a binary rule representation.
data, err := rule.Build(r)
if err != nil {
errs = append(errs, errors.Wrapf(err, "at %s: failed to interpret rule '%v'", location, line))
continue
}
// Detect duplicates based on the normalized binary rule representation.
existing, found := knownRules[string(data)]
if found {
errs = append(errs, errors.Errorf("at %s: rule '%v' is a duplicate of '%v' at %s", location, line, existing.rule.flags, existing.source))
continue
}
rule := auditRule{flags: line, data: []byte(data)}
knownRules[string(data)] = ruleWithSource{rule, location}
rules = append(rules, rule)
}
if len(errs) > 0 {
return nil, errors.Wrap(errs.Err(), "failed loading rules")
}
return rules, nil
}

View File

@ -1,6 +1,28 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package auditd
import (
"fmt"
"io/ioutil"
"path/filepath"
"strconv"
"strings"
"testing"
"github.com/stretchr/testify/assert"
@ -20,10 +42,8 @@ audit_rules: |
if err != nil {
t.Fatal(err)
}
rules, err := config.rules()
if err != nil {
t.Fatal()
}
rules := config.rules()
assert.EqualValues(t, []string{
"-w /etc/passwd -p wa -k auth",
"-a always,exit -S execve -k exec",
@ -73,6 +93,107 @@ func TestConfigValidateConnectionType(t *testing.T) {
t.Log(err)
}
func TestConfigRuleOrdering(t *testing.T) {
const fileMode = 0644
config := defaultConfig
config.RulesBlob = strings.Join([]string{
makeRuleFlags(0, 0),
makeRuleFlags(0, 1),
makeRuleFlags(0, 2),
}, "\n")
dir1, err := ioutil.TempDir("", "rules1")
if err != nil {
t.Fatal(err)
}
for _, file := range []struct {
order int
name string
}{
{0, "00_first.conf"},
{5, "99_last.conf"},
{2, "03_auth.conf"},
{4, "20_exec.conf"},
{3, "10_network_access.conf"},
{1, "01_32bit_abi.conf"},
} {
path := filepath.Join(dir1, file.name)
content := []byte(strings.Join([]string{
makeRuleFlags(1+file.order, 0),
makeRuleFlags(1+file.order, 1),
makeRuleFlags(1+file.order, 2),
makeRuleFlags(1+file.order, 3),
}, "\n"))
if err = ioutil.WriteFile(path, content, fileMode); err != nil {
t.Fatal(err)
}
}
dir2, err := ioutil.TempDir("", "rules0")
if err != nil {
t.Fatal(err)
}
for _, file := range []struct {
order int
name string
}{
{3, "99_tail.conf"},
{0, "00_head.conf"},
{2, "50_mid.conf"},
{1, "13.conf"},
} {
path := filepath.Join(dir2, file.name)
content := []byte(strings.Join([]string{
makeRuleFlags(10+file.order, 0),
makeRuleFlags(10+file.order, 1),
makeRuleFlags(10+file.order, 2),
makeRuleFlags(10+file.order, 3),
}, "\n"))
if err = ioutil.WriteFile(path, content, fileMode); err != nil {
t.Fatal(err)
}
}
config.RuleFiles = []string{
fmt.Sprintf("%s/*.conf", dir1),
fmt.Sprintf("%s/*.conf", dir2),
}
if err = config.Validate(); err != nil {
t.Fatal(err)
}
rules := config.rules()
fileNo, ruleNo := 0, 0
for _, rule := range rules {
parts := strings.Split(rule.flags, " ")
assert.Len(t, parts, 6, rule.flags)
fields := strings.Split(parts[5], ":")
assert.Len(t, fields, 3, rule.flags)
fileID, err := strconv.Atoi(fields[1])
if err != nil {
t.Fatal(err, rule.flags)
}
ruleID, err := strconv.Atoi(fields[2])
if err != nil {
t.Fatal(err, rule.flags)
}
if fileID > fileNo {
fileNo = fileID
ruleNo = 0
}
assert.Equal(t, fileNo, fileID, rule.flags)
assert.Equal(t, ruleNo, ruleID, rule.flags)
ruleNo++
}
}
func makeRuleFlags(fileID, ruleID int) string {
return fmt.Sprintf("-w /path/%d/%d -p rwxa -k rule:%d:%d", fileID, ruleID, fileID, ruleID)
}
func parseConfig(t testing.TB, yaml string) (Config, error) {
c, err := common.NewConfigWithYAML([]byte(yaml), "")
if err != nil {

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// Package auditd is a metricset that subscribes to the Linux Audit Framework
// to receive audit events from the the kernel.
package auditd

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package auditd
import (

View File

@ -0,0 +1,139 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package auditd
import (
"fmt"
"os"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/elastic/go-libaudit"
"github.com/elastic/go-libaudit/rule"
"github.com/elastic/beats/auditbeat/cmd"
)
var (
dontResolveIDs bool
noOutputIfEmpty bool
singleLineStatus bool
)
func init() {
showRules := cobra.Command{
Use: "auditd-rules",
Short: "Show currently installed auditd rules",
Aliases: []string{"audit-rules", "audit_rules", "rules", "auditdrules", "auditrules"},
Run: func(cmd *cobra.Command, args []string) {
if err := showAuditdRules(); err != nil {
fmt.Fprintf(os.Stderr, "Failed to show auditd rules: %v\n", err)
os.Exit(1)
}
},
}
showRules.Flags().BoolVarP(&dontResolveIDs, "no-resolve", "n", false, "Don't resolve numeric IDs (UIDs, GIDs and file_type fields)")
showRules.Flags().BoolVarP(&noOutputIfEmpty, "no-output", "z", false, "Don't generate output when the rule list is empty")
showStatus := cobra.Command{
Use: "auditd-status",
Short: "Show kernel auditd status",
Aliases: []string{"audit-status", "audit_status", "status", "auditdstatus", "auditrules"},
Run: func(cmd *cobra.Command, args []string) {
if err := showAuditdStatus(); err != nil {
fmt.Fprintf(os.Stderr, "Failed to show auditd rules: %v\n", err)
os.Exit(1)
}
},
}
showStatus.Flags().BoolVarP(&singleLineStatus, "single-line", "s", false, "Output status as a single line")
cmd.ShowCmd.AddCommand(&showRules, &showStatus)
}
func showAuditdRules() error {
client, err := libaudit.NewAuditClient(nil)
if err != nil {
return errors.Wrap(err, "failed to create audit client")
}
defer client.Close()
rules, err := client.GetRules()
if err != nil {
return errors.Wrap(err, "failed to list existing rules")
}
for idx, raw := range rules {
r, err := rule.ToCommandLine(raw, !dontResolveIDs)
if err != nil {
fmt.Fprintf(os.Stderr, "Error decoding rule %d: %v\n", idx, err)
fmt.Fprintf(os.Stderr, "Raw dump: <<<%v>>>\n", raw)
}
fmt.Println(r)
}
if !noOutputIfEmpty && len(rules) == 0 {
fmt.Println("No rules")
}
return nil
}
func showAuditdStatus() error {
client, err := libaudit.NewAuditClient(nil)
if err != nil {
return errors.Wrap(err, "failed to create audit client")
}
defer client.Close()
status, err := client.GetStatus()
if err != nil {
return errors.Wrap(err, "failed to get audit status")
}
if status.FeatureBitmap == libaudit.AuditFeatureBitmapBacklogWaitTime {
// If FeatureBitmap value is "2", means we're running under an old kernel
// in which FeatureBitmap meant Version. Version 2 supports both
// backlog_wait_time and backlog_limit.
status.FeatureBitmap |= libaudit.AuditFeatureBitmapBacklogLimit
}
separator := '\n'
if singleLineStatus {
separator = ' '
}
fmt.Printf("enabled %d%c"+
"failure %d%c"+
"pid %d%c"+
"rate_limit %d%c"+
"backlog_limit %d%c"+
"lost %d%c"+
"backlog %d%c"+
"backlog_wait_time %d%c"+
"features %s\n",
status.Enabled, separator,
status.Failure, separator,
status.PID, separator,
status.RateLimit, separator,
status.BacklogLimit, separator,
status.Lost, separator,
status.Backlog, separator,
status.BacklogWaitTime, separator,
fmt.Sprintf("%#x", status.FeatureBitmap))
return nil
}

View File

@ -1,9 +1,9 @@
{{ if .reference -}}
{{ if .Reference -}}
# The file integrity module sends events when files are changed (created,
# updated, deleted). The events contain file metadata and hashes.
{{ end -}}
- module: file_integrity
{{ if eq .goos "darwin" -}}
{{ if eq .GOOS "darwin" -}}
paths:
- /bin
- /usr/bin
@ -11,7 +11,7 @@
- /sbin
- /usr/sbin
- /usr/local/sbin
{{ else if eq .goos "windows" -}}
{{ else if eq .GOOS "windows" -}}
paths:
- C:/windows
- C:/windows/system32
@ -25,15 +25,15 @@
- /usr/sbin
- /etc
{{- end }}
{{ if .reference }}
{{ if .Reference }}
# List of regular expressions to filter out notifications for unwanted files.
# Wrap in single quotes to workaround YAML escaping rules. By default no files
# are ignored.
{{ if eq .goos "darwin" -}}
{{ if eq .GOOS "darwin" -}}
exclude_files:
- '\.DS_Store$'
- '\.swp$'
{{ else if eq .goos "windows" -}}
{{ else if eq .GOOS "windows" -}}
exclude_files:
- '(?i)\.lnk$'
- '(?i)\.swp$'
@ -58,7 +58,7 @@
# Hash types to compute when the file changes. Supported types are
# blake2b_256, blake2b_384, blake2b_512, md5, sha1, sha224, sha256, sha384,
# sha512, sha512_224, sha512_256, sha3_224, sha3_256, sha3_384 and sha3_512.
# sha512, sha512_224, sha512_256, sha3_224, sha3_256, sha3_384, sha3_512, and xxh64.
# Default is sha1.
hash_types: [sha1]

View File

@ -96,7 +96,7 @@ a suffix to the value. The supported units are `b` (default), `kib`, `kb`, `mib`
*`hash_types`*:: A list of hash types to compute when the file changes.
The supported hash types are `blake2b_256`, `blake2b_384`, `blake2b_512`, `md5`,
`sha1`, `sha224`, `sha256`, `sha384`, `sha512`, `sha512_224`, `sha512_256`,
`sha3_224`, `sha3_256`, `sha3_384`, and `sha3_512`. The default value is `sha1`.
`sha3_224`, `sha3_256`, `sha3_384`, `sha3_512`, and `xxh64`. The default value is `sha1`.
*`recursive`*:: By default, the watches set to the paths specified in
`paths` are not recursive. This means that only changes to the contents

View File

@ -68,3 +68,7 @@
- name: sha512_256
type: keyword
description: SHA512/256 hash of the file.
- name: xxh64
type: keyword
description: XX64 hash of the file.

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package file_integrity
import (

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package file_integrity
import (
@ -27,6 +44,7 @@ var validHashes = []HashType{
SHA1,
SHA224, SHA256, SHA384, SHA512, SHA512_224, SHA512_256,
SHA3_224, SHA3_256, SHA3_384, SHA3_512,
XXH64,
}
// Enum of hash types.
@ -46,6 +64,7 @@ const (
SHA512 HashType = "sha512"
SHA512_224 HashType = "sha512_224"
SHA512_256 HashType = "sha512_256"
XXH64 HashType = "xxh64"
)
// Config contains the configuration parameters for the file integrity

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package file_integrity
import (

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package file_integrity
import (
@ -16,6 +33,7 @@ import (
"strconv"
"time"
"github.com/OneOfOne/xxhash"
"github.com/pkg/errors"
"golang.org/x/crypto/blake2b"
"golang.org/x/crypto/sha3"
@ -39,7 +57,7 @@ func (s Source) String() string {
func (s Source) MarshalText() ([]byte, error) { return []byte(s.String()), nil }
const (
// SourceScan identifies events triggerd by a file system scan.
// SourceScan identifies events triggered by a file system scan.
SourceScan Source = iota
// SourceFSNotify identifies events triggered by a notification from the
// file system.
@ -377,6 +395,8 @@ func hashFile(name string, hashType ...HashType) (map[HashType]Digest, error) {
hashes = append(hashes, sha512.New512_224())
case SHA512_256:
hashes = append(hashes, sha512.New512_256())
case XXH64:
hashes = append(hashes, xxhash.New64())
default:
return nil, errors.Errorf("unknown hash type '%v'", name)
}

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package file_integrity
import (
@ -172,6 +189,7 @@ func TestHashFile(t *testing.T) {
SHA3_256: mustDecodeHex("3cb5385a2987ca45888d7877fbcf92b4854f7155ae19c96cecc7ea1300c6f5a4"),
SHA3_384: mustDecodeHex("f19539818b4f29fa0ee599db4113fd81b77cd1119682e6d799a052849d2e40ef0dad84bc947ba2dee742d9731f1b9e9b"),
SHA3_512: mustDecodeHex("f0a2c0f9090c1fd6dedf211192e36a6668d2b3c7f57a35419acb1c4fc7dfffc267bbcd90f5f38676caddcab652f6aacd1ed4e0ad0a8e1e4b98f890b62b6c7c5c"),
XXH64: mustDecodeHex("d3e8573b7abf279a"),
}
f, err := ioutil.TempFile("", "input.txt")

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// +build darwin
package file_integrity

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// +build linux freebsd openbsd netbsd windows
package file_integrity
@ -30,7 +47,6 @@ func NewEventReader(c Config) (EventProducer, error) {
return &reader{
watcher: watcher,
config: c,
eventC: make(chan Event, 1),
log: logp.NewLogger(moduleName),
}, nil
}
@ -39,7 +55,16 @@ func (r *reader) Start(done <-chan struct{}) (<-chan Event, error) {
if err := r.watcher.Start(); err != nil {
return nil, errors.Wrap(err, "unable to start watcher")
}
go r.consumeEvents(done)
queueDone := make(chan struct{})
queueC := make(chan []*Event)
// Launch a separate goroutine to fetch all events that happen while
// watches are being installed.
go func() {
defer close(queueC)
queueC <- r.enqueueEvents(queueDone)
}()
// Windows implementation of fsnotify needs to have the watched paths
// installed after the event consumer is started, to avoid a potential
@ -56,21 +81,53 @@ func (r *reader) Start(done <-chan struct{}) (<-chan Event, error) {
}
}
close(queueDone)
events := <-queueC
// Populate callee's event channel with the previously received events
r.eventC = make(chan Event, 1+len(events))
for _, ev := range events {
r.eventC <- *ev
}
go r.consumeEvents(done)
r.log.Infow("Started fsnotify watcher",
"file_path", r.config.Paths,
"recursive", r.config.Recursive)
return r.eventC, nil
}
func (r *reader) enqueueEvents(done <-chan struct{}) (events []*Event) {
for {
ev := r.nextEvent(done)
if ev == nil {
return
}
events = append(events, ev)
}
}
func (r *reader) consumeEvents(done <-chan struct{}) {
defer close(r.eventC)
defer r.watcher.Close()
for {
select {
case <-done:
ev := r.nextEvent(done)
if ev == nil {
r.log.Debug("fsnotify reader terminated")
return
}
r.eventC <- *ev
}
}
func (r *reader) nextEvent(done <-chan struct{}) *Event {
for {
select {
case <-done:
return nil
case event := <-r.watcher.EventChannel():
if event.Name == "" || r.config.IsExcludedPath(event.Name) {
continue
@ -84,9 +141,14 @@ func (r *reader) consumeEvents(done <-chan struct{}) {
r.config.MaxFileSizeBytes, r.config.HashTypes)
e.rtt = time.Since(start)
r.eventC <- e
return &e
case err := <-r.watcher.ErrorChannel():
r.log.Warnw("fsnotify watcher error", "error", err)
// a bug in fsnotify can cause spurious nil errors to be sent
// on the error channel.
if err != nil {
r.log.Warnw("fsnotify watcher error", "error", err)
}
}
}
}

View File

@ -1,10 +1,29 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package file_integrity
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strings"
"syscall"
"testing"
"time"
@ -216,6 +235,74 @@ func TestEventReader(t *testing.T) {
})
}
func TestRaces(t *testing.T) {
const (
fileMode os.FileMode = 0640
N = 100
)
var dirs []string
for i := 0; i < N; i++ {
dir, err := ioutil.TempDir("", "audit")
if err != nil {
t.Fatal(err)
}
if dir, err = filepath.EvalSymlinks(dir); err != nil {
t.Fatal(err)
}
dirs = append(dirs, dir)
}
defer func() {
for _, dir := range dirs {
os.RemoveAll(dir)
}
}()
// Create a new EventProducer.
config := defaultConfig
config.Paths = dirs
config.Recursive = true
r, err := NewEventReader(config)
if err != nil {
t.Fatal(err)
}
done := make(chan struct{})
defer close(done)
// Generate a lot of events in parallel to Start() so there is a chance of
// events arriving before all watched dirs are Add()-ed
go func() {
for i := 0; i < 10; i++ {
for _, dir := range dirs {
fname := filepath.Join(dir, fmt.Sprintf("%d.dat", i))
ioutil.WriteFile(fname, []byte("hello"), fileMode)
}
}
}()
eventC, err := r.Start(done)
if err != nil {
t.Fatal(err)
}
const marker = "test_file"
for _, dir := range dirs {
fname := filepath.Join(dir, marker)
ioutil.WriteFile(fname, []byte("hello"), fileMode)
}
got := 0
for i := 0; got < N; i++ {
ev := readTimeout(t, eventC)
if strings.Contains(ev.Path, marker) {
got++
}
}
assert.Equal(t, N, got)
}
// readTimeout reads one event from the channel and returns it. If it does
// not receive an event after one second it will time-out and fail the test.
func readTimeout(t testing.TB, events <-chan Event) Event {

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// +build !linux,!freebsd,!openbsd,!netbsd,!windows,!darwin
package file_integrity

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// +build freebsd openbsd netbsd darwin
package file_integrity

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// +build linux
package file_integrity

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// +build linux freebsd openbsd netbsd darwin
package file_integrity

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package file_integrity
import (

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// +build windows
package file_integrity

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// +build darwin
package file_integrity

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// +build !darwin
package file_integrity

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// +build darwin
package file_integrity

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package file_integrity
import (
@ -95,6 +112,8 @@ func fbWriteHash(b *flatbuffers.Builder, hashes map[HashType]Digest) flatbuffers
schema.HashAddSha512224(b, offset)
case SHA512_256:
schema.HashAddSha512256(b, offset)
case XXH64:
schema.HashAddXx64(b, offset)
}
}
return schema.HashEnd(b)
@ -309,6 +328,9 @@ func fbDecodeHash(e *schema.Event) map[HashType]Digest {
case SHA512_256:
length = hash.Sha512256Length()
producer = hash.Sha512256
case XXH64:
length = hash.Xx64Length()
producer = hash.Xx64
default:
panic(errors.Errorf("unhandled hash type: %v", hashType))
}

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package file_integrity
import (

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package file_integrity
import (
@ -5,7 +22,7 @@ import (
"os"
"time"
"github.com/boltdb/bolt"
bolt "github.com/coreos/bbolt"
"github.com/pkg/errors"
"github.com/elastic/beats/auditbeat/datastore"

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package file_integrity
import (
@ -39,7 +56,7 @@ func TestData(t *testing.T) {
}
fullEvent := mbtest.StandardizeEvent(ms, events[len(events)-1], core.AddDatasetToEvent)
mbtest.WriteEventToDataJSON(t, fullEvent)
mbtest.WriteEventToDataJSON(t, fullEvent, "")
}
func TestDetectDeletedFiles(t *testing.T) {

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package monitor
import (

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// +build !integration
package monitor

View File

@ -1,9 +1,30 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package monitor
import (
"github.com/fsnotify/fsnotify"
)
const (
moduleName = "file_integrity"
)
// Watcher is an interface for a file watcher akin to fsnotify.Watcher
// with an additional Start method.
type Watcher interface {

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// +build !integration
package monitor
@ -56,6 +73,14 @@ func TestNonRecursive(t *testing.T) {
}
func TestRecursive(t *testing.T) {
if runtime.GOOS == "darwin" {
// This test races on Darwin because internal races in the kqueue
// implementation of fsnotify when a watch is added in response to
// a subdirectory created inside a watched directory.
// This race doesn't affect auditbeat because the file_integrity module
// under Darwin uses fsevents instead of kqueue.
t.Skip("Disabled on Darwin")
}
dir, err := ioutil.TempDir("", "monitor")
assertNoError(t, err)
// under macOS, temp dir has a symlink in the path (/var -> /private/var)
@ -181,13 +206,14 @@ func TestRecursiveSubdirPermissions(t *testing.T) {
assertNoError(t, os.Chmod(filepath.Join(outDir, "b"), 0))
// Setup watched on watched dir
// Setup watches on watched dir
watcher, err := New(true)
assertNoError(t, err)
assertNoError(t, watcher.Add(dir))
assertNoError(t, watcher.Start())
assertNoError(t, watcher.Add(dir))
defer func() {
assertNoError(t, watcher.Close())
}()
@ -268,17 +294,30 @@ func testDirOps(t *testing.T, dir string, watcher Watcher) {
assert.Equal(t, fpath, ev.Name)
assert.Equal(t, fsnotify.Write, ev.Op)
// Consume all leftover writes to fpath
for err == nil && ev.Name == fpath && ev.Op == fsnotify.Write {
ev, err = readTimeout(t, watcher)
}
// Helper to read events ignoring a write to the parent dir, which seems
// to trigger sometimes under Windows when moving files around in a dir.
readIgnoreParent := func(t *testing.T, w Watcher) (fsnotify.Event, error) {
for {
ev, err := readTimeout(t, w)
if err != nil || ev.Name != dir || ev.Op != fsnotify.Write {
return ev, err
}
}
}
// Move
err = os.Rename(fpath, fpath2)
assertNoError(t, err)
evRename, err := readTimeout(t, watcher)
evRename, err := readIgnoreParent(t, watcher)
assertNoError(t, err)
// Sometimes a duplicate Write can be received under Linux, skip
if evRename.Op == fsnotify.Write {
evRename, err = readTimeout(t, watcher)
}
evCreate, err := readTimeout(t, watcher)
evCreate, err := readIgnoreParent(t, watcher)
assertNoError(t, err)
if evRename.Op != fsnotify.Rename {
@ -295,14 +334,9 @@ func testDirOps(t *testing.T, dir string, watcher Watcher) {
err = os.Remove(fpath2)
assertNoError(t, err)
ev, err = readTimeout(t, watcher)
ev, err = readIgnoreParent(t, watcher)
assertNoError(t, err)
// Windows: A write to the parent directory sneaks in
if ev.Op == fsnotify.Write && ev.Name == dir {
ev, err = readTimeout(t, watcher)
assertNoError(t, err)
}
assert.Equal(t, fpath2, ev.Name)
assert.Equal(t, fsnotify.Remove, ev.Op)
}

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package monitor
import "github.com/fsnotify/fsnotify"

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package monitor
import (
@ -7,20 +24,28 @@ import (
"github.com/fsnotify/fsnotify"
"github.com/joeshaw/multierror"
"github.com/pkg/errors"
"github.com/elastic/beats/libbeat/logp"
)
type recursiveWatcher struct {
inner *fsnotify.Watcher
tree FileTree
eventC chan fsnotify.Event
done chan bool
inner *fsnotify.Watcher
tree FileTree
eventC chan fsnotify.Event
done chan bool
addC chan string
addErrC chan error
log *logp.Logger
}
func newRecursiveWatcher(inner *fsnotify.Watcher) *recursiveWatcher {
return &recursiveWatcher{
inner: inner,
tree: FileTree{},
eventC: make(chan fsnotify.Event, 1),
inner: inner,
tree: FileTree{},
eventC: make(chan fsnotify.Event, 1),
addC: make(chan string),
addErrC: make(chan error),
log: logp.NewLogger(moduleName),
}
}
@ -31,6 +56,10 @@ func (watcher *recursiveWatcher) Start() error {
}
func (watcher *recursiveWatcher) Add(path string) error {
if watcher.done != nil {
watcher.addC <- path
return <-watcher.addErrC
}
return watcher.addRecursive(path)
}
@ -54,11 +83,16 @@ func (watcher *recursiveWatcher) ErrorChannel() <-chan error {
func (watcher *recursiveWatcher) addRecursive(path string) error {
var errs multierror.Errors
err := filepath.Walk(path, func(path string, info os.FileInfo, err error) error {
if err != nil {
errs = append(errs, errors.Wrapf(watcher.inner.Add(path), "recursion into dir '%s' failed", path))
return nil
err := filepath.Walk(path, func(path string, info os.FileInfo, fnErr error) error {
if fnErr != nil {
errs = append(errs, errors.Wrapf(fnErr, "error walking path '%s'", path))
// If FileInfo is not nil, the directory entry can be processed
// even if there was some error
if info == nil {
return nil
}
}
var err error
if info.IsDir() {
if err = watcher.tree.AddDir(path); err == nil {
if err = watcher.inner.Add(path); err != nil {
@ -71,6 +105,8 @@ func (watcher *recursiveWatcher) addRecursive(path string) error {
}
return err
})
watcher.log.Debugw("Added recursive watch", "path", path)
if err != nil {
errs = append(errs, errors.Wrapf(err, "failed to walk path '%s'", path))
}
@ -82,6 +118,21 @@ func (watcher *recursiveWatcher) close() error {
return watcher.inner.Close()
}
func (watcher *recursiveWatcher) deliver(ev fsnotify.Event) {
for {
select {
case <-watcher.done:
return
case path := <-watcher.addC:
watcher.addErrC <- watcher.addRecursive(path)
case watcher.eventC <- ev:
return
}
}
}
func (watcher *recursiveWatcher) forwardEvents() error {
defer watcher.close()
@ -90,6 +141,9 @@ func (watcher *recursiveWatcher) forwardEvents() error {
case <-watcher.done:
return nil
case path := <-watcher.addC:
watcher.addErrC <- watcher.addRecursive(path)
case event, ok := <-watcher.inner.Events:
if !ok {
return nil
@ -99,37 +153,51 @@ func (watcher *recursiveWatcher) forwardEvents() error {
}
switch event.Op {
case fsnotify.Create:
if err := watcher.addRecursive(event.Name); err != nil {
watcher.inner.Errors <- errors.Wrapf(err, "unable to recurse path '%s'", event.Name)
err := watcher.addRecursive(event.Name)
if err != nil {
watcher.inner.Errors <- errors.Wrapf(err, "failed to add created path '%s'", event.Name)
}
watcher.tree.Visit(event.Name, PreOrder, func(path string, _ bool) error {
watcher.eventC <- fsnotify.Event{
err = watcher.tree.Visit(event.Name, PreOrder, func(path string, _ bool) error {
watcher.deliver(fsnotify.Event{
Name: path,
Op: event.Op,
}
})
return nil
})
if err != nil {
watcher.inner.Errors <- errors.Wrapf(err, "failed to visit created path '%s'", event.Name)
}
case fsnotify.Remove:
watcher.tree.Visit(event.Name, PostOrder, func(path string, _ bool) error {
watcher.eventC <- fsnotify.Event{
err := watcher.tree.Visit(event.Name, PostOrder, func(path string, _ bool) error {
watcher.deliver(fsnotify.Event{
Name: path,
Op: event.Op,
}
})
return nil
})
watcher.tree.Remove(event.Name)
if err != nil {
watcher.inner.Errors <- errors.Wrapf(err, "failed to visit removed path '%s'", event.Name)
}
err = watcher.tree.Remove(event.Name)
if err != nil {
watcher.inner.Errors <- errors.Wrapf(err, "failed to visit removed path '%s'", event.Name)
}
// Handling rename (move) as a special case to give this recursion
// the same semantics as macOS FSEvents:
// - Removal of a dir notifies removal for all files inside it
// - Moving a dir away sends only one notification for this dir
case fsnotify.Rename:
watcher.tree.Remove(event.Name)
err := watcher.tree.Remove(event.Name)
if err != nil {
watcher.inner.Errors <- errors.Wrapf(err, "failed to remove path '%s'", event.Name)
}
fallthrough
default:
watcher.eventC <- event
watcher.deliver(event)
}
}
}

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package file_integrity
import (

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package file_integrity
import (

View File

@ -57,6 +57,8 @@ table Hash {
blake2b_256: [byte];
blake2b_384: [byte];
blake2b_512: [byte];
xxh64: [byte];
}
table Event {

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// automatically generated by the FlatBuffers compiler, do not modify
package schema

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// automatically generated by the FlatBuffers compiler, do not modify
package schema

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// automatically generated by the FlatBuffers compiler, do not modify
package schema
@ -281,8 +298,25 @@ func (rcv *Hash) Blake2b512Length() int {
return 0
}
func (rcv *Hash) Xx64(j int) int8 {
o := flatbuffers.UOffsetT(rcv._tab.Offset(34))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.GetInt8(a + flatbuffers.UOffsetT(j*1))
}
return 0
}
func (rcv *Hash) Xx64Length() int {
o := flatbuffers.UOffsetT(rcv._tab.Offset(34))
if o != 0 {
return rcv._tab.VectorLen(o)
}
return 0
}
func HashStart(builder *flatbuffers.Builder) {
builder.StartObject(15)
builder.StartObject(16)
}
func HashAddMd5(builder *flatbuffers.Builder, md5 flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(md5), 0)
@ -374,6 +408,12 @@ func HashAddBlake2b512(builder *flatbuffers.Builder, blake2b512 flatbuffers.UOff
func HashStartBlake2b512Vector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(1, numElems, 1)
}
func HashAddXx64(builder *flatbuffers.Builder, xx64 flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(15, flatbuffers.UOffsetT(xx64), 0)
}
func HashStartXx64Vector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(1, numElems, 1)
}
func HashEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
return builder.EndObject()
}

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// automatically generated by the FlatBuffers compiler, do not modify
package schema

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// automatically generated by the FlatBuffers compiler, do not modify
package schema

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// automatically generated by the FlatBuffers compiler, do not modify
package schema

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package file_integrity
type SecurityDescriptor struct{}

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package file_integrity
import (

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
package file_integrity

View File

@ -1,3 +1,20 @@
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package main
import (
@ -14,7 +31,7 @@ import (
"github.com/pkg/errors"
)
const defaultGlob = "module/*/_meta/config*.yml.tpl"
const defaultGlob = "module/*/_meta/config*.yml.tmpl"
var (
goos = flag.String("os", runtime.GOOS, "generate config specific to the specified operating system")
@ -35,26 +52,29 @@ func findConfigFiles(globs []string) ([]string, error) {
return configFiles, nil
}
// archBits returns the number of bit width of the GOARCH architecture value.
// This function is used by the auditd module configuration templates to
// generate architecture specific audit rules.
func archBits(goarch string) int {
switch goarch {
case "386", "arm":
return 32
default:
return 64
}
}
func getConfig(file string) ([]byte, error) {
tpl, err := template.ParseFiles(file)
if err != nil {
return nil, errors.Wrapf(err, "failed reading %v", file)
}
var archBits string
switch *goarch {
case "i386":
archBits = "32"
case "amd64":
archBits = "64"
default:
return nil, fmt.Errorf("supporting only i386 and amd64 architecture")
}
data := map[string]interface{}{
"goarch": *goarch,
"goos": *goos,
"reference": *reference,
"arch_bits": archBits,
"GOARCH": *goarch,
"GOOS": *goos,
"Reference": *reference,
"ArchBits": archBits,
}
buf := new(bytes.Buffer)
if err = tpl.Execute(buf, data); err != nil {
@ -109,7 +129,7 @@ func main() {
if *concat {
segments = append(segments, segment)
} else {
output(segment, strings.TrimSuffix(file, ".tpl"))
output(segment, strings.TrimSuffix(file, ".tmpl"))
}
}

View File

@ -3,7 +3,7 @@ import sys
import os
import shutil
import unittest
from auditbeat import BaseTest
from auditbeat import *
from elasticsearch import Elasticsearch
from beat.beat import INTEGRATION_TESTS
@ -13,44 +13,47 @@ class Test(BaseTest):
"""
Auditbeat starts and stops without error.
"""
self.render_config_template(
modules=[{
"name": "file_integrity",
"extras": {
"paths": ["file.example"],
}
}],
)
proc = self.start_beat()
self.wait_until(lambda: self.log_contains("start running"))
proc.check_kill_and_wait()
self.assert_no_logged_warnings()
dirs = [self.temp_dir("auditbeat_test")]
with PathCleanup(dirs):
self.render_config_template(
modules=[{
"name": "file_integrity",
"extras": {
"paths": dirs,
}
}],
)
proc = self.start_beat()
self.wait_until(lambda: self.log_contains("start running"))
proc.check_kill_and_wait()
self.assert_no_logged_warnings()
# Ensure all Beater stages are used.
assert self.log_contains("Setup Beat: auditbeat")
assert self.log_contains("auditbeat start running")
assert self.log_contains("auditbeat stopped")
# Ensure all Beater stages are used.
assert self.log_contains("Setup Beat: auditbeat")
assert self.log_contains("auditbeat start running")
assert self.log_contains("auditbeat stopped")
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_template(self):
"""
Test that the template can be loaded with `setup --template`
"""
es = Elasticsearch([self.get_elasticsearch_url()])
dirs = [self.temp_dir("auditbeat_test")]
with PathCleanup(dirs):
es = Elasticsearch([self.get_elasticsearch_url()])
self.render_config_template(
modules=[{
"name": "file_integrity",
"extras": {
"paths": ["file.example"],
}
}],
elasticsearch={"host": self.get_elasticsearch_url()})
exit_code = self.run_beat(extra_args=["setup", "--template"])
self.render_config_template(
modules=[{
"name": "file_integrity",
"extras": {
"paths": dirs,
}
}],
elasticsearch={"host": self.get_elasticsearch_url()})
self.run_beat(extra_args=["setup", "--template"], exit_code=0)
assert exit_code == 0
assert self.log_contains('Loaded index template')
assert len(es.cat.templates(name='auditbeat-*', h='name')) > 0
assert self.log_contains('Loaded index template')
assert len(es.cat.templates(name='auditbeat-*', h='name')) > 0
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_dashboards(self):
@ -58,21 +61,22 @@ class Test(BaseTest):
Test that the dashboards can be loaded with `setup --dashboards`
"""
kibana_dir = os.path.join(self.beat_path, "_meta", "kibana")
shutil.copytree(kibana_dir, os.path.join(self.working_dir, "kibana"))
dirs = [self.temp_dir("auditbeat_test")]
with PathCleanup(dirs):
kibana_dir = os.path.join(self.beat_path, "_meta", "kibana.generated")
shutil.copytree(kibana_dir, os.path.join(self.working_dir, "kibana"))
es = Elasticsearch([self.get_elasticsearch_url()])
self.render_config_template(
modules=[{
"name": "file_integrity",
"extras": {
"paths": ["file.example"],
}
}],
elasticsearch={"host": self.get_elasticsearch_url()},
kibana={"host": self.get_kibana_url()},
)
exit_code = self.run_beat(extra_args=["setup", "--dashboards"])
es = Elasticsearch([self.get_elasticsearch_url()])
self.render_config_template(
modules=[{
"name": "file_integrity",
"extras": {
"paths": dirs,
}
}],
elasticsearch={"host": self.get_elasticsearch_url()},
kibana={"host": self.get_kibana_url()},
)
self.run_beat(extra_args=["setup", "--dashboards"], exit_code=0)
assert exit_code == 0
assert self.log_contains("Kibana dashboards successfully loaded.")
assert self.log_contains("Kibana dashboards successfully loaded.")

View File

@ -1,10 +1,5 @@
import sys
import os
import shutil
import time
import unittest
from auditbeat import *
from beat.beat import INTEGRATION_TESTS
# Escapes a path to match what's printed in the logs
@ -111,6 +106,8 @@ class Test(BaseTest):
self.wait_log_contains("\"deleted\"")
self.wait_log_contains("\"path\": \"{0}\"".format(escape_path(subdir)), ignore_case=True)
self.wait_output(3)
self.wait_until(lambda: any(
'file.path' in obj and obj['file.path'].lower() == subdir.lower() for obj in self.read_output()))
proc.check_kill_and_wait()
self.assert_no_logged_warnings()
@ -157,18 +154,24 @@ class Test(BaseTest):
self.wait_log_contains(escape_path(dirs[0]), max_timeout=30, ignore_case=True)
self.wait_log_contains("\"recursive\": true")
# auditbeat_test/subdir/
subdir = os.path.join(dirs[0], "subdir")
os.mkdir(subdir)
# auditbeat_test/subdir/file.txt
file1 = os.path.join(subdir, "file.txt")
self.create_file(file1, "hello world!")
# auditbeat_test/subdir/other/
subdir2 = os.path.join(subdir, "other")
os.mkdir(subdir2)
# auditbeat_test/subdir/other/more.txt
file2 = os.path.join(subdir2, "more.txt")
self.create_file(file2, "")
self.wait_log_contains("\"path\": \"{0}\"".format(escape_path(file2)), ignore_case=True)
self.wait_output(4)
self.wait_until(lambda: any(
'file.path' in obj and obj['file.path'].lower() == subdir2.lower() for obj in self.read_output()))
proc.check_kill_and_wait()
self.assert_no_logged_warnings()

View File

@ -0,0 +1,126 @@
import os
import platform
import sys
import tempfile
import unittest
from auditbeat import *
def is_root():
if 'geteuid' not in dir(os):
return False
euid = os.geteuid()
print("euid is", euid)
return euid == 0
# Require Linux greater than 3.10
# Can't connect to kauditd in 3.10 or older
def is_supported_linux():
p = platform.platform().split('-')
if p[0] != 'Linux':
return False
kv = p[1].split('.')
if int(kv[0]) < 3 or (int(kv[0]) == 3 and int(kv[1]) <= 10):
return False
return True
@unittest.skipUnless(is_supported_linux(), "Requires Linux 3.11+")
class Test(BaseTest):
def test_show_command(self):
"""
show sub-command is present
Runs auditbeat show --help. The process should terminate with
a successful status if show is recognised.
"""
self.run_beat(extra_args=['show', '--help'], exit_code=0)
@unittest.skipUnless(is_root(), "Requires root")
def test_show_auditd_rules(self):
"""
show auditd-rules sub-command
Set some rules and read them.
"""
pid = os.getpid()
rules = [
'-w {0} -p w -k rule0_{1}'.format(os.path.realpath(__file__), pid),
'-a always,exit -S mount -F pid={0} -F key=rule1_{0}'.format(pid),
]
rules_body = '|\n' + ''.join([' ' + rule + '\n' for rule in rules])
self.render_config_template(
modules=[{
"name": "auditd",
"extras": {
"audit_rules": rules_body
}
}]
)
proc = self.start_beat(extra_args=['-strict.perms=false'])
# auditbeat adds an extra rule to ignore itself
self.wait_log_contains('Successfully added {0} of {0} audit rules.'.format(len(rules) + 1),
max_timeout=30)
proc.kill()
fd, output_file = tempfile.mkstemp()
self.run_beat(extra_args=['show', 'auditd-rules'],
exit_code=0,
output=output_file)
fhandle = os.fdopen(fd, 'rb')
lines = fhandle.readlines()
fhandle.close()
os.unlink(output_file)
assert len(lines) >= len(rules)
# get rid of automatic rule
if '-F key=rule' not in lines[0]:
del lines[0]
for i in range(len(rules)):
expected = rules[i]
got = lines[i].strip()
assert expected == got, \
"rule {0} doesn't match. expected='{1}' got='{2}'".format(
i, expected, got
)
@unittest.skipUnless(is_root(), "Requires root")
def test_show_auditd_status(self):
"""
show auditd-status sub-command
"""
expected = [
'enabled',
'failure',
'pid',
'rate_limit',
'backlog_limit',
'lost',
'backlog',
'backlog_wait_time',
'features',
]
fields = dict((f, False) for f in expected)
fd, output_file = tempfile.mkstemp()
self.run_beat(extra_args=['show', 'auditd-status'],
exit_code=0,
output=output_file)
fhandle = os.fdopen(fd, 'rb')
lines = fhandle.readlines()
fhandle.close()
os.unlink(output_file)
for line in lines:
if line == "PASS\n":
break
k, v = line.strip().split()
assert k in fields, "Unexpected field '{0}'".format(k)
assert not fields[k], "Field '{0}' repeated".format(k)
n = int(v, 0)
assert n >= 0, "Field '{0}' has negative value {1}".format(k, v)
fields[k] = True
for (k, v) in fields.iteritems():
assert v, "Field {0} not found".format(k)

View File

@ -0,0 +1,21 @@
auditbeat.modules:
- module: auditd
audit_rules: |
-w /etc/passwd -p wa -k identity
-a always,exit -F arch=b32 -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EPERM -k access
- module: file_integrity
paths:
- /bin
- /usr/bin
- /sbin
- /usr/sbin
- /etc
processors:
- add_cloud_metadata: ~
output.elasticsearch:
hosts: '${ELASTICSEARCH_HOSTS:elasticsearch:9200}'
username: '${ELASTICSEARCH_USERNAME:}'
password: '${ELASTICSEARCH_PASSWORD:}'

View File

@ -0,0 +1,17 @@
filebeat.config:
modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
filebeat.autodiscover:
providers:
- type: docker
hints.enabled: true
processors:
- add_cloud_metadata: ~
output.elasticsearch:
hosts: '${ELASTICSEARCH_HOSTS:elasticsearch:9200}'
username: '${ELASTICSEARCH_USERNAME:}'
password: '${ELASTICSEARCH_PASSWORD:}'

View File

@ -0,0 +1,20 @@
heartbeat.monitors:
- type: http
schedule: '@every 5s'
urls:
- http://elasticsearch:9200
- http://kibana:5601
- type: icmp
schedule: '@every 5s'
hosts:
- elasticsearch
- kibana
processors:
- add_cloud_metadata: ~
output.elasticsearch:
hosts: '${ELASTICSEARCH_HOSTS:elasticsearch:9200}'
username: '${ELASTICSEARCH_USERNAME:}'
password: '${ELASTICSEARCH_PASSWORD:}'

View File

@ -0,0 +1,33 @@
metricbeat.config:
modules:
path: ${path.config}/modules.d/*.yml
# Reload module configs as they change:
reload.enabled: false
metricbeat.autodiscover:
providers:
- type: docker
hints.enabled: true
metricbeat.modules:
- module: docker
metricsets:
- "container"
- "cpu"
- "diskio"
- "healthcheck"
- "info"
#- "image"
- "memory"
- "network"
hosts: ["unix:///var/run/docker.sock"]
period: 10s
enabled: true
processors:
- add_cloud_metadata: ~
output.elasticsearch:
hosts: '${ELASTICSEARCH_HOSTS:elasticsearch:9200}'
username: '${ELASTICSEARCH_USERNAME:}'
password: '${ELASTICSEARCH_PASSWORD:}'

Some files were not shown because too many files have changed in this diff Show More