mirror of
https://github.com/Icinga/icingabeat.git
synced 2025-04-08 17:15:05 +02:00
Compare commits
74 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
d667e10439 | ||
|
9babdbcf8c | ||
|
d1c5f06575 | ||
|
6c3bb8f98c | ||
|
3963d39045 | ||
|
518917ddcd | ||
|
e5f2689b47 | ||
|
feba0a3b32 | ||
|
c2b9c59e79 | ||
|
9f7358a956 | ||
|
cc46e4977c | ||
|
7125c03380 | ||
|
158b0d684e | ||
|
cbeed36768 | ||
|
59103fa0a6 | ||
|
6a77caeb37 | ||
|
67afdc97b1 | ||
|
f47c510aff | ||
|
9835f6fb2c | ||
|
2e15a262f2 | ||
|
cc8ca7dd1c | ||
|
f37bcf1fd9 | ||
|
1c6fc6ee26 | ||
|
63a71bf279 | ||
|
a7595c88f8 | ||
|
81be451ba5 | ||
|
ced805d846 | ||
|
9b37630ebc | ||
|
c9199033fd | ||
|
0724c3c4cd | ||
|
20d5e0fc77 | ||
|
bad1e4999c | ||
|
b1815bdc87 | ||
|
740e1f60a9 | ||
|
7ccff980ad | ||
|
1d0745bb25 | ||
|
9d6e58fa47 | ||
|
96abd765dd | ||
|
7357b0f489 | ||
|
97a5b11407 | ||
|
782f0ad3c8 | ||
|
7bbc24d3ab | ||
|
cd7d1e09f5 | ||
|
e7c3ba2472 | ||
|
d32627748e | ||
|
babf54a140 | ||
|
7b72dbc0bd | ||
|
9e1c436410 | ||
|
1443e86ffa | ||
|
243bed531e | ||
|
98c1a3a894 | ||
|
9f856fc765 | ||
|
62fcc54da6 | ||
|
b4155364f4 | ||
|
4ddca0bcff | ||
|
adb3ff1f6c | ||
|
eb697707f2 | ||
|
1a80e69145 | ||
|
6096673bf7 | ||
|
592affc521 | ||
|
6a1ac8ba5a | ||
|
1533eb14a5 | ||
|
5fa901bf8c | ||
|
91933c985e | ||
|
7459690a7f | ||
|
96393b8d09 | ||
|
b3f886912b | ||
|
26f561bb60 | ||
|
1269707447 | ||
|
250e248f98 | ||
|
24b40e8039 | ||
|
defa035fa8 | ||
|
d9d582d921 | ||
|
dd5751f335 |
@ -6,11 +6,10 @@ services:
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.9
|
||||
- "1.13"
|
||||
|
||||
os:
|
||||
- linux
|
||||
- osx
|
||||
|
||||
env:
|
||||
matrix:
|
||||
@ -32,6 +31,8 @@ before_install:
|
||||
- export TRAVIS_BUILD_DIR=$HOME/gopath/src/github.com/icinga/icingabeat/
|
||||
- cd $HOME/gopath/src/github.com/icinga/icingabeat/
|
||||
- go get github.com/Masterminds/glide
|
||||
- go get github.com/magefile/mage/mg
|
||||
- go get github.com/sirupsen/logrus
|
||||
|
||||
install:
|
||||
- true
|
||||
|
1
AUTHORS
1
AUTHORS
@ -1,3 +1,4 @@
|
||||
Alexander <35256191+lx183@users.noreply.github.com>
|
||||
Blerim Sheqa <blerim.sheqa@icinga.com>
|
||||
Dorian Lenzner <Dorian.Lenzner@telekom.de>
|
||||
Michael Friedrich <michael.friedrich@icinga.com>
|
||||
|
43
CHANGELOG.md
43
CHANGELOG.md
@ -1,4 +1,47 @@
|
||||
# Icingabeat CHANGELOG
|
||||
|
||||
## v7.17.4
|
||||
|
||||
### Features
|
||||
* Update libbeat to version 7.14.2
|
||||
|
||||
### Breaking Changes
|
||||
* Dashboards now must be importaed manually using Kibana
|
||||
|
||||
## v7.14.2
|
||||
|
||||
### Features
|
||||
* Update libbeat to version 7.14.2
|
||||
|
||||
## v7.5.2
|
||||
|
||||
### Features
|
||||
* Update libbeat to version 7.5.2
|
||||
|
||||
## v7.4.2
|
||||
|
||||
### Features
|
||||
* Update libbeat to version 7.4.2
|
||||
|
||||
## v6.5.4
|
||||
|
||||
### Features
|
||||
* Update libbeat to version 6.5.4
|
||||
* Move all field names to 'icinga' namespace
|
||||
|
||||
### Bugs
|
||||
* Prevent usage of reserved keywords
|
||||
|
||||
## v6.3.3
|
||||
|
||||
### Features
|
||||
* Update libbeat to version 6.3.3
|
||||
|
||||
### Bugs
|
||||
* Remove `zones` key from statuspoller. This key may become to big to process.
|
||||
* Catch 404 return codes
|
||||
* Update dashboard directory schema so `icingabeat setup` works out of the box
|
||||
|
||||
## v6.1.1
|
||||
|
||||
### Features
|
||||
|
55
Makefile
55
Makefile
@ -1,53 +1,22 @@
|
||||
BEAT_NAME=icingabeat
|
||||
BEAT_DIR=github.com/icinga/icingabeat
|
||||
BEAT_DESCRIPTION=Icingabeat ships Icinga 2 events and states to Elasticsearch or Logstash.
|
||||
BEAT_VENDOR=Icinga
|
||||
BEAT_DOC_URL=https://github.com/Icinga/icingabeat
|
||||
BEAT_PATH=github.com/icinga/icingabeat
|
||||
BEAT_DOC_URL?=https://icinga.com/docs/icingabeat
|
||||
BEAT_GOPATH=$(firstword $(subst :, ,${GOPATH}))
|
||||
SYSTEM_TESTS=false
|
||||
TEST_ENVIRONMENT=false
|
||||
ES_BEATS?=./vendor/github.com/elastic/beats
|
||||
GOPACKAGES=$(shell glide novendor)
|
||||
PREFIX?=.
|
||||
|
||||
#TARGETS="linux/amd64 linux/386 windows/amd64 windows/386 darwin/amd64"
|
||||
#PACKAGES=${BEATNAME}/deb ${BEATNAME}/rpm ${BEATNAME}/darwin ${BEATNAME}/win ${BEATNAME}/bin
|
||||
#SNAPSHOT=false
|
||||
ES_BEATS_IMPORT_PATH=github.com/elastic/beats/v7
|
||||
ES_BEATS?=$(shell go list -m -f '{{.Dir}}' ${ES_BEATS_IMPORT_PATH})
|
||||
LIBBEAT_MAKEFILE=$(ES_BEATS)/libbeat/scripts/Makefile
|
||||
GOPACKAGES=$(shell go list ${BEAT_PATH}/... | grep -v /tools)
|
||||
GOBUILD_FLAGS=-i -ldflags "-X ${ES_BEATS_IMPORT_PATH}/libbeat/version.buildTime=$(NOW) -X ${ES_BEATS_IMPORT_PATH}/libbeat/version.commit=$(COMMIT_ID)"
|
||||
MAGE_IMPORT_PATH=github.com/magefile/mage
|
||||
NO_COLLECT=true
|
||||
CHECK_HEADERS_DISABLED=true
|
||||
|
||||
# Path to the libbeat Makefile
|
||||
-include $(ES_BEATS)/libbeat/scripts/Makefile
|
||||
-include $(LIBBEAT_MAKEFILE)
|
||||
|
||||
# Initial beat setup
|
||||
.PHONY: setup
|
||||
setup: copy-vendor
|
||||
make update
|
||||
|
||||
# Copy beats into vendor directory
|
||||
.PHONY: copy-vendor
|
||||
copy-vendor:
|
||||
mkdir -p vendor/github.com/elastic/
|
||||
cp -R ${GOPATH}/src/github.com/elastic/beats vendor/github.com/elastic/
|
||||
rm -rf vendor/github.com/elastic/beats/.git
|
||||
mage vendorUpdate
|
||||
|
||||
.PHONY: git-init
|
||||
git-init:
|
||||
git init
|
||||
git add README.md CONTRIBUTING.md
|
||||
git commit -m "Initial commit"
|
||||
git add LICENSE
|
||||
git commit -m "Add the LICENSE"
|
||||
git add .gitignore
|
||||
git commit -m "Add git settings"
|
||||
git add .
|
||||
git reset -- .travis.yml
|
||||
git commit -m "Add icingabeat"
|
||||
git add .travis.yml
|
||||
git commit -m "Add Travis CI"
|
||||
|
||||
# This is called by the beats packer before building starts
|
||||
.PHONY: before-build
|
||||
before-build:
|
||||
|
||||
# Collects all dependencies and then calls update
|
||||
.PHONY: collect
|
||||
collect:
|
||||
|
1
NOTICE.txt
Normal file
1
NOTICE.txt
Normal file
@ -0,0 +1 @@
|
||||
This file only exists to make `make package` happy.
|
77
README.md
77
README.md
@ -25,7 +25,7 @@ for more information
|
||||
|
||||
#### Requirements
|
||||
|
||||
* [Golang](https://golang.org/dl/) 1.9
|
||||
* [Golang](https://golang.org/dl/) 1.16
|
||||
|
||||
#### Clone
|
||||
|
||||
@ -47,7 +47,7 @@ To build the binary for Icingabeat run the command below. This will generate a
|
||||
binary in the same directory with the name icingabeat.
|
||||
|
||||
```shell
|
||||
make
|
||||
mage build
|
||||
```
|
||||
|
||||
#### Run
|
||||
@ -57,49 +57,6 @@ To run Icingabeat with debugging output enabled, run:
|
||||
./icingabeat -c icingabeat.yml -e -d "*"
|
||||
```
|
||||
|
||||
#### Test
|
||||
|
||||
To test Icingabeat, run the following command:
|
||||
|
||||
```shell
|
||||
make testsuite
|
||||
```
|
||||
|
||||
alternatively:
|
||||
```shell
|
||||
make unit-tests
|
||||
make system-tests
|
||||
make integration-tests
|
||||
make coverage-report
|
||||
```
|
||||
|
||||
The test coverage is reported in the folder `./build/coverage/`
|
||||
|
||||
#### Update
|
||||
|
||||
Each beat has a template for the mapping in elasticsearch and a documentation
|
||||
for the fields which is automatically generated based on `etc/fields.yml`.
|
||||
To generate etc/icingabeat.template.json and etc/icingabeat.asciidoc
|
||||
|
||||
```shell
|
||||
make update
|
||||
```
|
||||
|
||||
#### Cleanup
|
||||
|
||||
To clean Icingabeat source code, run the following commands:
|
||||
|
||||
```shell
|
||||
make fmt
|
||||
make simplify
|
||||
```
|
||||
|
||||
To clean up the build directory and generated artifacts, run:
|
||||
|
||||
```shell
|
||||
make clean
|
||||
```
|
||||
|
||||
### Packaging
|
||||
|
||||
The beat frameworks provides tools to crosscompile and package your beat for
|
||||
@ -108,35 +65,9 @@ vendoring as described above. To build packages of your beat, run the following
|
||||
command:
|
||||
|
||||
```shell
|
||||
make package
|
||||
export PLATFORMS="linux/amd64 linux/386"
|
||||
mage package
|
||||
```
|
||||
|
||||
This will fetch and create all images required for the build process. The whole
|
||||
process can take several minutes to finish.
|
||||
|
||||
To disable snapshot packages or build specific packages, set the following
|
||||
environment variables:
|
||||
|
||||
```shell
|
||||
export SNAPSHOT=false
|
||||
export TARGETS="\"linux/amd64 linux/386\""
|
||||
export PACKAGES=icingabeat/deb
|
||||
make package
|
||||
```
|
||||
|
||||
#### Dashboards
|
||||
To be able to export dashboards with all their dependencies (visualizations and
|
||||
searches) you have to name the dashboard with a `icingabeat-` prefix.
|
||||
|
||||
Export dashboards:
|
||||
```shell
|
||||
export ES_URL=http://127.0.0.1:9200
|
||||
make export-dashboards
|
||||
```
|
||||
|
||||
After exporting, dashboards can be packaged:
|
||||
|
||||
```shell
|
||||
export SNAPSHOT=false
|
||||
make package-dashboards
|
||||
```
|
||||
|
12
RELEASE.md
12
RELEASE.md
@ -14,14 +14,7 @@ git commit -am "Update AUTHORS"
|
||||
## 2. Changelog
|
||||
Update [CHANGELOG.md] with all relevant information.
|
||||
|
||||
## 3. Version
|
||||
Version numbers are incremented regarding the [SemVer 1.0.0] specification.
|
||||
Update the version number in the following files:
|
||||
|
||||
* `version.yml`
|
||||
* `vendor/github.com/elastic/beats/dev-tools/packer/version.yml`
|
||||
|
||||
## 4. Build
|
||||
## 3. Build
|
||||
Build packages:
|
||||
|
||||
``` bash
|
||||
@ -36,7 +29,7 @@ export SNAPSHOT=false
|
||||
make package-dashboards
|
||||
```
|
||||
|
||||
## 5. Git Tag
|
||||
## 4. Git Tag
|
||||
Commit all changes to the `master` branch
|
||||
|
||||
``` bash
|
||||
@ -56,7 +49,6 @@ Push tags
|
||||
git push --tags
|
||||
```
|
||||
|
||||
[SemVer 1.0.0]: http://semver.org/spec/v1.0.0.html
|
||||
[CHANGELOG.md]: CHANGELOG.md
|
||||
[AUTHORS]: AUTHORS
|
||||
[.mailmap]: .mailmap
|
||||
|
73
_meta/config/beat.yml.tmpl
Normal file
73
_meta/config/beat.yml.tmpl
Normal file
@ -0,0 +1,73 @@
|
||||
################### Icingabeat Configuration Example #########################
|
||||
|
||||
############################# Icingabeat ######################################
|
||||
|
||||
icingabeat:
|
||||
|
||||
# Defines the Icinga API endpoint
|
||||
host: "localhost"
|
||||
|
||||
# Defines the port of the API endpoint
|
||||
port: 5665
|
||||
|
||||
# A user with sufficient permissions
|
||||
user: "icinga"
|
||||
|
||||
# Password of the user
|
||||
password: "icinga"
|
||||
|
||||
# Configure SSL verification. If `false` is configured, all server hosts
|
||||
# and certificates will be accepted. In this mode, SSL based connections are
|
||||
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
|
||||
# `true`.
|
||||
ssl.verify: true
|
||||
|
||||
# List of root certificates for HTTPS server verifications
|
||||
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
|
||||
|
||||
########################### Icingabeat Eventstream ##########################
|
||||
#
|
||||
# Icingabeat supports capturing of an evenstream and periodical polling of the
|
||||
# Icinga status data.
|
||||
|
||||
# Decide which events to receive from the event stream.
|
||||
# The following event stream types are available:
|
||||
#
|
||||
# * CheckResult
|
||||
# * StateChange
|
||||
# * Notification
|
||||
# * AcknowledgementSet
|
||||
# * AcknowledgementCleared
|
||||
# * CommentAdded
|
||||
# * CommentRemoved
|
||||
# * DowntimeAdded
|
||||
# * DowntimeRemoved
|
||||
# * DowntimeStarted
|
||||
# * DowntimeTriggered
|
||||
#
|
||||
# To disable eventstream, leave the types empty or comment out the option
|
||||
eventstream.types:
|
||||
- CheckResult
|
||||
- StateChange
|
||||
|
||||
# Event streams can be filtered by attributes using the prefix 'event.'
|
||||
#
|
||||
# Example for the CheckResult type with the exit_code set to 2:
|
||||
# filter: "event.check_result.exit_status==2"
|
||||
#
|
||||
# Example for the CheckResult type with the service matching the string
|
||||
# pattern "mysql*":
|
||||
# filter: 'match("mysql*", event.service)'
|
||||
#
|
||||
# To disable filtering set an empty string or comment out the filter option
|
||||
eventstream.filter: ""
|
||||
|
||||
# Defines how fast to reconnect to the API on connection loss
|
||||
eventstream.retry_interval: 10s
|
||||
|
||||
########################### Icingabeat Statuspoller #########################
|
||||
#
|
||||
# Icingabeat can collect status information about Icinga 2 periodically. Set
|
||||
# an interval at which the status API should be called. Set to 0 to disable
|
||||
# polling.
|
||||
statuspoller.interval: 60s
|
@ -1,687 +0,0 @@
|
||||
- key: icingabeat
|
||||
title: icingabeat
|
||||
description: Data received from the Icinga 2 API
|
||||
fields:
|
||||
- name: timestamp
|
||||
type: date
|
||||
description: >
|
||||
Timestamp of event occurrence
|
||||
|
||||
- name: type
|
||||
type: keyword
|
||||
description: >
|
||||
Type of the document
|
||||
|
||||
- name: host
|
||||
type: keyword
|
||||
description: >
|
||||
Host that triggered the event
|
||||
|
||||
- name: service
|
||||
type: keyword
|
||||
description: >
|
||||
Service that triggered the event
|
||||
|
||||
- name: state
|
||||
type: integer
|
||||
description: >
|
||||
State of the check
|
||||
|
||||
- name: state_type
|
||||
type: integer
|
||||
description: >
|
||||
State type of the check
|
||||
|
||||
- name: author
|
||||
type: keyword
|
||||
description: >
|
||||
Author of a message
|
||||
|
||||
- name: notification_type
|
||||
type: keyword
|
||||
description: >
|
||||
Type of notification
|
||||
|
||||
- name: text
|
||||
type: text
|
||||
description: >
|
||||
Text of a message
|
||||
|
||||
- name: users
|
||||
type: keyword
|
||||
description: >
|
||||
Affected users of a notification
|
||||
|
||||
- name: acknowledgement_type
|
||||
type: integer
|
||||
description: >
|
||||
Type of an acknowledgement
|
||||
|
||||
- name: expiry
|
||||
type: date
|
||||
description: >
|
||||
Expiry of an acknowledgement
|
||||
|
||||
- name: notify
|
||||
type: keyword
|
||||
description: >
|
||||
If has been sent out
|
||||
|
||||
- name: check_result.active
|
||||
type: boolean
|
||||
description: >
|
||||
If check was active or passive
|
||||
|
||||
- name: check_result.check_source
|
||||
type: keyword
|
||||
description: >
|
||||
Icinga instance that scheduled the check
|
||||
|
||||
- name: check_result.command
|
||||
type: text
|
||||
description: >
|
||||
Command that was executed
|
||||
|
||||
- name: check_result.execution_end
|
||||
type: date
|
||||
description: >
|
||||
Time when execution of check ended
|
||||
|
||||
- name: check_result.execution_start
|
||||
type: date
|
||||
description: >
|
||||
Time when execution of check started
|
||||
|
||||
- name: check_result.exit_status
|
||||
type: integer
|
||||
description: >
|
||||
Exit status
|
||||
|
||||
- name: check_result.output
|
||||
type: text
|
||||
description: >
|
||||
Output of check
|
||||
|
||||
- name: check_result.performance_data
|
||||
type: text
|
||||
description: >
|
||||
Performance data in text format
|
||||
|
||||
- name: check_result.schedule_end
|
||||
type: date
|
||||
description: >
|
||||
Time when scheduling of the check ended
|
||||
|
||||
- name: check_result.schedule_start
|
||||
type: date
|
||||
description: >
|
||||
Time when check was scheduled
|
||||
|
||||
- name: check_result.state
|
||||
type: integer
|
||||
description: >
|
||||
State of the check
|
||||
|
||||
- name: check_result.type
|
||||
type: keyword
|
||||
description: >
|
||||
Type of this event
|
||||
|
||||
- name: check_result.vars_after.attempt
|
||||
type: integer
|
||||
description: >
|
||||
Check attempt after check execution
|
||||
|
||||
- name: check_result.vars_after.reachable
|
||||
type: boolean
|
||||
description: >
|
||||
Reachable state after check execution
|
||||
|
||||
- name: check_result.vars_after.state
|
||||
type: integer
|
||||
description: >
|
||||
State of the check after execution
|
||||
|
||||
- name: check_result.vars_after.state_type
|
||||
type: integer
|
||||
description: >
|
||||
State type after execution
|
||||
|
||||
- name: check_result.vars_before.attempt
|
||||
type: integer
|
||||
description: >
|
||||
Check attempt before check execution
|
||||
|
||||
- name: check_result.vars_before.reachable
|
||||
type: boolean
|
||||
description: >
|
||||
Reachable state before check execution
|
||||
|
||||
- name: check_result.vars_before.state
|
||||
type: integer
|
||||
description: >
|
||||
Check state before check execution
|
||||
|
||||
- name: check_result.vars_before.state_type
|
||||
type: integer
|
||||
description: >
|
||||
State type before check execution
|
||||
|
||||
- name: comment.__name
|
||||
type: text
|
||||
description: >
|
||||
Unique identifier of a comment
|
||||
|
||||
- name: comment.author
|
||||
type: keyword
|
||||
description: >
|
||||
Author of a comment
|
||||
|
||||
- name: comment.entry_time
|
||||
type: date
|
||||
description: >
|
||||
Entry time of a comment
|
||||
|
||||
- name: comment.entry_type
|
||||
type: integer
|
||||
description: >
|
||||
Entry type of a comment
|
||||
|
||||
- name: comment.expire_time
|
||||
type: date
|
||||
description: >
|
||||
Expire time of a comment
|
||||
|
||||
- name: comment.host_name
|
||||
type: keyword
|
||||
description: >
|
||||
Host name of a comment
|
||||
|
||||
- name: comment.legacy_id
|
||||
type: integer
|
||||
description: >
|
||||
Legacy ID of a comment
|
||||
|
||||
- name: comment.name
|
||||
type: keyword
|
||||
description: >
|
||||
Identifier of a comment
|
||||
|
||||
- name: comment.package
|
||||
type: keyword
|
||||
description: >
|
||||
Config package of a comment
|
||||
|
||||
- name: comment.service_name
|
||||
type: keyword
|
||||
description: >
|
||||
Service name of a comment
|
||||
|
||||
- name: comment.templates
|
||||
type: text
|
||||
description: >
|
||||
Templates used by a comment
|
||||
|
||||
- name: comment.text
|
||||
type: text
|
||||
description: >
|
||||
Text of a comment
|
||||
|
||||
- name: comment.type
|
||||
type: keyword
|
||||
description: >
|
||||
Comment type
|
||||
|
||||
- name: comment.version
|
||||
type: keyword
|
||||
description: >
|
||||
Config version of comment object
|
||||
|
||||
- name: comment.zone
|
||||
type: keyword
|
||||
description: >
|
||||
Zone where comment was generated
|
||||
|
||||
- name: downtime.__name
|
||||
type: text
|
||||
description: >
|
||||
Unique identifier of a downtime
|
||||
|
||||
- name: downtime.author
|
||||
type: keyword
|
||||
description: >
|
||||
Author of a downtime
|
||||
|
||||
- name: downtime.comment
|
||||
type: text
|
||||
description: >
|
||||
Text of a downtime
|
||||
|
||||
- name: downtime.config_owner
|
||||
type: text
|
||||
description: >
|
||||
Config owner
|
||||
|
||||
- name: downtime.duration
|
||||
type: integer
|
||||
description: >
|
||||
Duration of a downtime
|
||||
|
||||
- name: downtime.end_time
|
||||
type: date
|
||||
description: >
|
||||
Timestamp of downtime end
|
||||
|
||||
- name: downtime.entry_time
|
||||
type: date
|
||||
description: >
|
||||
Timestamp when downtime was created
|
||||
|
||||
- name: downtime.fixed
|
||||
type: boolean
|
||||
description: >
|
||||
If downtime is fixed or flexible
|
||||
|
||||
- name: downtime.host_name
|
||||
type: keyword
|
||||
description: >
|
||||
Hostname of a downtime
|
||||
|
||||
- name: downtime.legacy_id
|
||||
type: integer
|
||||
description: >
|
||||
The integer ID of a downtime
|
||||
|
||||
- name: downtime.name
|
||||
type: keyword
|
||||
description: >
|
||||
Downtime config identifier
|
||||
|
||||
- name: downtime.package
|
||||
type: keyword
|
||||
description: >
|
||||
Configuration package of downtime
|
||||
|
||||
- name: downtime.scheduled_by
|
||||
type: text
|
||||
description: >
|
||||
By whom downtime was scheduled
|
||||
|
||||
- name: downtime.service_name
|
||||
type: keyword
|
||||
description: >
|
||||
Service name of a downtime
|
||||
|
||||
- name: downtime.start_time
|
||||
type: date
|
||||
description: >
|
||||
Timestamp when downtime starts
|
||||
|
||||
- name: downtime.templates
|
||||
type: text
|
||||
description: >
|
||||
Templates used by this downtime
|
||||
|
||||
- name: downtime.trigger_time
|
||||
type: date
|
||||
description: >
|
||||
Timestamp when downtime was triggered
|
||||
|
||||
- name: downtime.triggered_by
|
||||
type: text
|
||||
description: >
|
||||
By whom downtime was triggered
|
||||
|
||||
- name: downtime.triggers
|
||||
type: text
|
||||
description: >
|
||||
Downtime triggers
|
||||
|
||||
- name: downtime.type
|
||||
type: keyword
|
||||
description: >
|
||||
Downtime type
|
||||
|
||||
- name: downtime.version
|
||||
type: keyword
|
||||
description: >
|
||||
Config version of downtime
|
||||
|
||||
- name: downtime.was_cancelled
|
||||
type: boolean
|
||||
description: >
|
||||
If downtime was cancelled
|
||||
|
||||
- name: downtime.zone
|
||||
type: keyword
|
||||
description: >
|
||||
Zone of downtime
|
||||
|
||||
- name: status.active_host_checks
|
||||
type: integer
|
||||
description: >
|
||||
Active host checks
|
||||
|
||||
|
||||
- name: status.active_host_checks_15min
|
||||
type: integer
|
||||
description: >
|
||||
Active host checks in the last 15 minutes
|
||||
|
||||
|
||||
- name: status.active_host_checks_1min
|
||||
type: integer
|
||||
description: >
|
||||
Acitve host checks in the last minute
|
||||
|
||||
|
||||
- name: status.active_host_checks_5min
|
||||
type: integer
|
||||
description: >
|
||||
Active host checks in the last 5 minutes
|
||||
|
||||
|
||||
- name: status.active_service_checks
|
||||
type: integer
|
||||
description: >
|
||||
Active service checks
|
||||
|
||||
- name: status.active_service_checks_15min
|
||||
type: integer
|
||||
description: >
|
||||
Active service checks in the last 15 minutes
|
||||
|
||||
- name: status.active_service_checks_1min
|
||||
type: integer
|
||||
description: >
|
||||
Active service checks in the last minute
|
||||
|
||||
- name: status.active_service_checks_5min
|
||||
type: integer
|
||||
description: >
|
||||
Active service checks in the last 5 minutes
|
||||
|
||||
- name: status.api.identity
|
||||
type: keyword
|
||||
description: >
|
||||
API identity
|
||||
|
||||
- name: status.api.num_conn_endpoints
|
||||
type: integer
|
||||
description: >
|
||||
Number of connected endpoints
|
||||
|
||||
- name: status.api.num_endpoints
|
||||
type: integer
|
||||
description: >
|
||||
Total number of endpoints
|
||||
|
||||
- name: status.api.num_not_conn_endpoints
|
||||
type: integer
|
||||
description: >
|
||||
Number of not connected endpoints
|
||||
|
||||
- name: status.api.zones.demo.client_log_lag
|
||||
type: integer
|
||||
description: >
|
||||
Lag of the replaylog
|
||||
|
||||
- name: status.api.zones.demo.connected
|
||||
type: boolean
|
||||
description: >
|
||||
Zone connected
|
||||
|
||||
- name: status.api.zones.demo.endpoints
|
||||
type: text
|
||||
description: >
|
||||
Endpoint names
|
||||
|
||||
- name: status.api.zones.demo.parent_zone
|
||||
type: keyword
|
||||
description: >
|
||||
Parent zone
|
||||
|
||||
- name: status.avg_execution_time
|
||||
type: integer
|
||||
description: >
|
||||
Average execution time of checks
|
||||
|
||||
- name: status.avg_latency
|
||||
type: integer
|
||||
description: >
|
||||
Average latency time
|
||||
|
||||
- name: status.checkercomponent.checker.idle
|
||||
type: integer
|
||||
description: >
|
||||
Idle checks
|
||||
|
||||
- name: status.checkercomponent.checker.pending
|
||||
type: integer
|
||||
description: >
|
||||
Pending checks
|
||||
|
||||
- name: status.filelogger.main-log
|
||||
type: integer
|
||||
description: >
|
||||
Mainlog enabled
|
||||
|
||||
- name: status.icingaapplication.app.enable_event_handlers
|
||||
type: boolean
|
||||
description: >
|
||||
Event handlers enabled
|
||||
|
||||
- name: status.icingaapplication.app.enable_flapping
|
||||
type: boolean
|
||||
description: >
|
||||
Flapping detection enabled
|
||||
|
||||
- name: status.icingaapplication.app.enable_host_checks
|
||||
type: boolean
|
||||
description: >
|
||||
Host checks enabled
|
||||
|
||||
- name: status.icingaapplication.app.enable_notifications
|
||||
type: boolean
|
||||
description: >
|
||||
Notifications enabled
|
||||
|
||||
- name: status.icingaapplication.app.enable_perfdata
|
||||
type: boolean
|
||||
description: >
|
||||
Perfdata enabled
|
||||
|
||||
- name: status.icingaapplication.app.enable_service_checks
|
||||
type: boolean
|
||||
description: >
|
||||
Service checks enabled
|
||||
|
||||
- name: status.icingaapplication.app.node_name
|
||||
type: keyword
|
||||
description: >
|
||||
Node name
|
||||
|
||||
- name: status.icingaapplication.app.pid
|
||||
type: integer
|
||||
description: >
|
||||
PID
|
||||
|
||||
- name: status.icingaapplication.app.program_start
|
||||
type: integer
|
||||
description: >
|
||||
Time when Icinga started
|
||||
|
||||
- name: status.icingaapplication.app.version
|
||||
type: keyword
|
||||
description: >
|
||||
Version
|
||||
|
||||
- name: status.idomysqlconnection.ido-mysql.connected
|
||||
type: boolean
|
||||
description: >
|
||||
IDO connected
|
||||
|
||||
- name: status.idomysqlconnection.ido-mysql.instance_name
|
||||
type: keyword
|
||||
description: >
|
||||
IDO Instance name
|
||||
|
||||
- name: status.idomysqlconnection.ido-mysql.query_queue_items
|
||||
type: integer
|
||||
description: >
|
||||
IDO query items in the queue
|
||||
|
||||
- name: status.idomysqlconnection.ido-mysql.version
|
||||
type: keyword
|
||||
description: >
|
||||
IDO schema version
|
||||
|
||||
- name: status.max_execution_time
|
||||
type: integer
|
||||
description: >
|
||||
Max execution time
|
||||
|
||||
- name: status.max_latency
|
||||
type: integer
|
||||
description: >
|
||||
Max latency
|
||||
|
||||
- name: status.min_execution_time
|
||||
type: integer
|
||||
description: >
|
||||
Min execution time
|
||||
|
||||
- name: status.min_latency
|
||||
type: integer
|
||||
description: >
|
||||
Min latency
|
||||
|
||||
- name: status.notificationcomponent.notification
|
||||
type: integer
|
||||
description: >
|
||||
Notification
|
||||
|
||||
- name: status.num_hosts_acknowledged
|
||||
type: integer
|
||||
description: >
|
||||
Amount of acknowledged hosts
|
||||
|
||||
- name: status.num_hosts_down
|
||||
type: integer
|
||||
description: >
|
||||
Amount of down hosts
|
||||
|
||||
- name: status.num_hosts_flapping
|
||||
type: integer
|
||||
description: >
|
||||
Amount of flapping hosts
|
||||
|
||||
- name: status.num_hosts_in_downtime
|
||||
type: integer
|
||||
description: >
|
||||
Amount of hosts in downtime
|
||||
|
||||
- name: status.num_hosts_pending
|
||||
type: integer
|
||||
description: >
|
||||
Amount of pending hosts
|
||||
|
||||
- name: status.num_hosts_unreachable
|
||||
type: integer
|
||||
description: >
|
||||
Amount of unreachable hosts
|
||||
|
||||
- name: status.num_hosts_up
|
||||
type: integer
|
||||
description: >
|
||||
Amount of hosts in up state
|
||||
|
||||
- name: status.num_services_acknowledged
|
||||
type: integer
|
||||
description: >
|
||||
Amount of acknowledged services
|
||||
|
||||
- name: status.num_services_critical
|
||||
type: integer
|
||||
description: >
|
||||
Amount of critical services
|
||||
|
||||
- name: status.num_services_flapping
|
||||
type: integer
|
||||
description: >
|
||||
Amount of flapping services
|
||||
|
||||
- name: status.num_services_in_downtime
|
||||
type: integer
|
||||
description: >
|
||||
Amount of services in downtime
|
||||
|
||||
- name: status.num_services_ok
|
||||
type: integer
|
||||
description: >
|
||||
Amount of services in ok state
|
||||
|
||||
- name: status.num_services_pending
|
||||
type: integer
|
||||
description: >
|
||||
Amount of pending services
|
||||
|
||||
- name: status.num_services_unknown
|
||||
type: integer
|
||||
description: >
|
||||
Amount of unknown services
|
||||
|
||||
- name: status.num_services_unreachable
|
||||
type: integer
|
||||
description: >
|
||||
Amount of unreachable services
|
||||
|
||||
- name: status.num_services_warning
|
||||
type: integer
|
||||
description: >
|
||||
Amount of services in warning state
|
||||
|
||||
- name: status.passive_host_checks
|
||||
type: integer
|
||||
description: >
|
||||
Amount of passive host checks
|
||||
|
||||
- name: status.passive_host_checks_15min
|
||||
type: integer
|
||||
description: >
|
||||
Amount of passive host checks in the last 15 minutes
|
||||
|
||||
- name: status.passive_host_checks_1min
|
||||
type: integer
|
||||
description: >
|
||||
Amount of passive host checks in the last minute
|
||||
|
||||
- name: status.passive_host_checks_5min
|
||||
type: integer
|
||||
description: >
|
||||
Amount of passive host checks in the last 5 minutes
|
||||
|
||||
- name: status.passive_service_checks
|
||||
type: integer
|
||||
description: >
|
||||
Amount of passive service checks
|
||||
|
||||
- name: status.passive_service_checks_15min
|
||||
type: integer
|
||||
description: >
|
||||
Amount of passive service checks in the last 15 minutes
|
||||
|
||||
- name: status.passive_service_checks_1min
|
||||
type: integer
|
||||
description: >
|
||||
Amount of passive service checks in the last minute
|
||||
|
||||
- name: status.passive_service_checks_5min
|
||||
type: integer
|
||||
description: >
|
||||
Amount of passive service checks in the last 5 minutes
|
||||
|
||||
- name: status.uptime
|
||||
type: integer
|
||||
description: >
|
||||
Uptime
|
1348
_meta/fields.yml
1348
_meta/fields.yml
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
@ -1,13 +0,0 @@
|
||||
{
|
||||
"hits": 0,
|
||||
"timeRestore": false,
|
||||
"description": "",
|
||||
"title": "icingabeat-checkresults",
|
||||
"uiStateJSON": "{}",
|
||||
"panelsJSON": "[{\"size_x\":12,\"size_y\":3,\"panelIndex\":1,\"type\":\"visualization\",\"id\":\"9631be10-0977-11e7-a4dd-e96fa284b426\",\"col\":1,\"row\":1},{\"size_x\":3,\"size_y\":6,\"panelIndex\":2,\"type\":\"visualization\",\"id\":\"d50bb810-0978-11e7-a4dd-e96fa284b426\",\"col\":1,\"row\":4},{\"size_x\":4,\"size_y\":6,\"panelIndex\":3,\"type\":\"visualization\",\"id\":\"df437df0-0977-11e7-a4dd-e96fa284b426\",\"col\":4,\"row\":4},{\"size_x\":5,\"size_y\":6,\"panelIndex\":4,\"type\":\"visualization\",\"id\":\"cf643aa0-0977-11e7-a4dd-e96fa284b426\",\"col\":8,\"row\":4}]",
|
||||
"optionsJSON": "{\"darkTheme\":false}",
|
||||
"version": 1,
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\"filter\":[{\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"*\"}}}]}"
|
||||
}
|
||||
}
|
@ -1,13 +0,0 @@
|
||||
{
|
||||
"hits": 0,
|
||||
"timeRestore": false,
|
||||
"description": "",
|
||||
"title": "icingabeat-notifications",
|
||||
"uiStateJSON": "{}",
|
||||
"panelsJSON": "[{\"size_x\":12,\"size_y\":2,\"panelIndex\":1,\"type\":\"visualization\",\"id\":\"face9fb0-0b1a-11e7-ad60-c7e10cd34b29\",\"col\":1,\"row\":1},{\"size_x\":3,\"size_y\":2,\"panelIndex\":2,\"type\":\"visualization\",\"id\":\"e3813340-0b1a-11e7-ad60-c7e10cd34b29\",\"col\":1,\"row\":3},{\"size_x\":3,\"size_y\":2,\"panelIndex\":3,\"type\":\"visualization\",\"id\":\"cf7c1400-0b1a-11e7-ad60-c7e10cd34b29\",\"col\":4,\"row\":3},{\"size_x\":3,\"size_y\":2,\"panelIndex\":4,\"type\":\"visualization\",\"id\":\"d8a29b80-0b1a-11e7-ad60-c7e10cd34b29\",\"col\":7,\"row\":3},{\"size_x\":3,\"size_y\":2,\"panelIndex\":5,\"type\":\"visualization\",\"id\":\"6a6a66b0-0b1b-11e7-ad60-c7e10cd34b29\",\"col\":10,\"row\":3},{\"size_x\":12,\"size_y\":5,\"panelIndex\":6,\"type\":\"search\",\"id\":\"9b1ca350-0b1a-11e7-ad60-c7e10cd34b29\",\"col\":1,\"row\":5,\"columns\":[\"host\",\"service\",\"notification_type\",\"text\",\"users\"],\"sort\":[\"@timestamp\",\"desc\"]}]",
|
||||
"optionsJSON": "{\"darkTheme\":false}",
|
||||
"version": 1,
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\"filter\":[{\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true}}}]}"
|
||||
}
|
||||
}
|
@ -1,13 +0,0 @@
|
||||
{
|
||||
"hits": 0,
|
||||
"timeRestore": false,
|
||||
"description": "",
|
||||
"title": "icingabeat-status",
|
||||
"uiStateJSON": "{}",
|
||||
"panelsJSON": "[{\"col\":1,\"id\":\"bef57c00-098c-11e7-85fa-5daf7284b188\",\"panelIndex\":1,\"row\":1,\"size_x\":4,\"size_y\":2,\"type\":\"visualization\"},{\"col\":9,\"id\":\"1b2324e0-0993-11e7-a836-f12bdc7df120\",\"panelIndex\":5,\"row\":3,\"size_x\":4,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"47061440-0994-11e7-828e-2b8b7d3da4e9\",\"panelIndex\":6,\"row\":3,\"size_x\":4,\"size_y\":2,\"type\":\"visualization\"},{\"col\":5,\"id\":\"7bf45770-0994-11e7-862d-53e4526068ac\",\"panelIndex\":7,\"row\":3,\"size_x\":4,\"size_y\":2,\"type\":\"visualization\"},{\"col\":9,\"id\":\"bdd29b50-0996-11e7-862d-53e4526068ac\",\"panelIndex\":8,\"row\":5,\"size_x\":4,\"size_y\":2,\"type\":\"visualization\"},{\"col\":5,\"id\":\"dfb21700-0a63-11e7-a96b-35f342c9d63d\",\"panelIndex\":9,\"row\":5,\"size_x\":4,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"4326e7c0-0a64-11e7-a96b-35f342c9d63d\",\"panelIndex\":10,\"row\":5,\"size_x\":4,\"size_y\":2,\"type\":\"visualization\"},{\"col\":5,\"id\":\"45556270-0b12-11e7-ad60-c7e10cd34b29\",\"panelIndex\":11,\"row\":1,\"size_x\":2,\"size_y\":2,\"type\":\"visualization\"},{\"col\":7,\"id\":\"e7912c50-0b11-11e7-ad60-c7e10cd34b29\",\"panelIndex\":12,\"row\":1,\"size_x\":3,\"size_y\":2,\"type\":\"visualization\"},{\"col\":10,\"id\":\"27ed7600-0b12-11e7-ad60-c7e10cd34b29\",\"panelIndex\":13,\"row\":1,\"size_x\":3,\"size_y\":2,\"type\":\"visualization\"}]",
|
||||
"optionsJSON": "{\"darkTheme\":false}",
|
||||
"version": 1,
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\"filter\":[{\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"*\"}}}]}"
|
||||
}
|
||||
}
|
@ -1,114 +0,0 @@
|
||||
{
|
||||
"objects": [
|
||||
{
|
||||
"attributes": {
|
||||
"description": "",
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\"filter\":[],\"query\":{\"query\":\"\",\"language\":\"lucene\"}}"
|
||||
},
|
||||
"savedSearchId": "eb7896b0-e4bd-11e7-b4d1-8383451ae5a4",
|
||||
"title": "CheckResults by State",
|
||||
"uiStateJSON": "{\"vis\":{\"colors\":{\"Ok\":\"#629E51\",\"Warning\":\"#E5AC0E\",\"Critical\":\"#BF1B00\",\"Unknown\":\"#962D82\"}}}",
|
||||
"version": 1,
|
||||
"visState": "{\"title\":\"CheckResults by State\",\"type\":\"histogram\",\"params\":{\"type\":\"histogram\",\"grid\":{\"categoryLines\":false,\"style\":{\"color\":\"#eee\"}},\"categoryAxes\":[{\"id\":\"CategoryAxis-1\",\"type\":\"category\",\"position\":\"bottom\",\"show\":true,\"style\":{},\"scale\":{\"type\":\"linear\"},\"labels\":{\"show\":true,\"truncate\":100},\"title\":{}}],\"valueAxes\":[{\"id\":\"ValueAxis-1\",\"name\":\"LeftAxis-1\",\"type\":\"value\",\"position\":\"left\",\"show\":true,\"style\":{},\"scale\":{\"type\":\"linear\",\"mode\":\"normal\"},\"labels\":{\"show\":true,\"rotate\":0,\"filter\":false,\"truncate\":100},\"title\":{\"text\":\"Count\"}}],\"seriesParams\":[{\"show\":\"true\",\"type\":\"histogram\",\"mode\":\"stacked\",\"data\":{\"label\":\"Count\",\"id\":\"1\"},\"valueAxis\":\"ValueAxis-1\",\"drawLinesBetweenPoints\":true,\"showCircles\":true}],\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"times\":[],\"addTimeMarker\":false},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"date_histogram\",\"schema\":\"segment\",\"params\":{\"field\":\"@timestamp\",\"interval\":\"auto\",\"customInterval\":\"2h\",\"min_doc_count\":1,\"extended_bounds\":{}}},{\"id\":\"3\",\"enabled\":true,\"type\":\"filters\",\"schema\":\"group\",\"params\":{\"filters\":[{\"input\":{\"query\":\"check_result.state:0\"},\"label\":\"Ok\"},{\"input\":{\"query\":\"check_result.state:1\"},\"label\":\"Warning\"},{\"input\":{\"query\":\"check_result.state:3\"},\"label\":\"Critical\"},{\"input\":{\"query\":\"check_result.state:4\"},\"label\":\"Unknown\"}]}}]}"
|
||||
},
|
||||
"id": "a32bdf10-e4be-11e7-b4d1-8383451ae5a4",
|
||||
"type": "visualization",
|
||||
"updated_at": "2017-12-27T07:40:36.094Z",
|
||||
"version": 1
|
||||
},
|
||||
{
|
||||
"attributes": {
|
||||
"description": "",
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\"filter\":[],\"query\":{\"query\":\"\",\"language\":\"lucene\"}}"
|
||||
},
|
||||
"savedSearchId": "eb7896b0-e4bd-11e7-b4d1-8383451ae5a4",
|
||||
"title": "CheckResult Count",
|
||||
"uiStateJSON": "{}",
|
||||
"version": 1,
|
||||
"visState": "{\"title\":\"CheckResult Count\",\"type\":\"metric\",\"params\":{\"addTooltip\":true,\"addLegend\":false,\"type\":\"metric\",\"metric\":{\"percentageMode\":false,\"useRanges\":false,\"colorSchema\":\"Green to Red\",\"metricColorMode\":\"None\",\"colorsRange\":[{\"from\":0,\"to\":10000}],\"labels\":{\"show\":true},\"invertColors\":false,\"style\":{\"bgFill\":\"#000\",\"bgColor\":false,\"labelColor\":false,\"subText\":\"\",\"fontSize\":60}}},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{\"customLabel\":\"CheckResults received\"}}]}"
|
||||
},
|
||||
"id": "3bf26530-e4be-11e7-b4d1-8383451ae5a4",
|
||||
"type": "visualization",
|
||||
"updated_at": "2017-12-27T07:40:36.094Z",
|
||||
"version": 1
|
||||
},
|
||||
{
|
||||
"attributes": {
|
||||
"description": "",
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\"filter\":[],\"query\":{\"query\":\"\",\"language\":\"lucene\"}}"
|
||||
},
|
||||
"savedSearchId": "eb7896b0-e4bd-11e7-b4d1-8383451ae5a4",
|
||||
"title": "Hosts Tag Cloud",
|
||||
"uiStateJSON": "{}",
|
||||
"version": 1,
|
||||
"visState": "{\"title\":\"Hosts Tag Cloud\",\"type\":\"tagcloud\",\"params\":{\"scale\":\"linear\",\"orientation\":\"single\",\"minFontSize\":18,\"maxFontSize\":72},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"host\",\"size\":50,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Hosts\"}}]}"
|
||||
},
|
||||
"id": "4a9d5c50-e4c0-11e7-b4d1-8383451ae5a4",
|
||||
"type": "visualization",
|
||||
"updated_at": "2017-12-27T07:40:36.094Z",
|
||||
"version": 1
|
||||
},
|
||||
{
|
||||
"attributes": {
|
||||
"description": "",
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\"filter\":[],\"query\":{\"query\":\"\",\"language\":\"lucene\"}}"
|
||||
},
|
||||
"savedSearchId": "eb7896b0-e4bd-11e7-b4d1-8383451ae5a4",
|
||||
"title": "Services Tag Cloud",
|
||||
"uiStateJSON": "{}",
|
||||
"version": 1,
|
||||
"visState": "{\"title\":\"Services Tag Cloud\",\"type\":\"tagcloud\",\"params\":{\"scale\":\"linear\",\"orientation\":\"single\",\"minFontSize\":18,\"maxFontSize\":72},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"service\",\"size\":500,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Services\"}}]}"
|
||||
},
|
||||
"id": "6a23e300-e4c0-11e7-b4d1-8383451ae5a4",
|
||||
"type": "visualization",
|
||||
"updated_at": "2017-12-27T07:40:36.094Z",
|
||||
"version": 1
|
||||
},
|
||||
{
|
||||
"attributes": {
|
||||
"columns": [
|
||||
"_source"
|
||||
],
|
||||
"description": "",
|
||||
"hits": 0,
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\n \"index\": \"icingabeat-*\",\n \"highlightAll\": true,\n \"version\": true,\n \"query\": {\n \"language\": \"lucene\",\n \"query\": \"type:icingabeat.event.checkresult\"\n },\n \"filter\": []\n}"
|
||||
},
|
||||
"sort": [
|
||||
"@timestamp",
|
||||
"desc"
|
||||
],
|
||||
"title": "CheckResults",
|
||||
"version": 1
|
||||
},
|
||||
"id": "eb7896b0-e4bd-11e7-b4d1-8383451ae5a4",
|
||||
"type": "search",
|
||||
"updated_at": "2017-12-27T07:51:40.826Z",
|
||||
"version": 2
|
||||
},
|
||||
{
|
||||
"attributes": {
|
||||
"description": "Summary of check results received by Icinga",
|
||||
"hits": 0,
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\"query\":{\"query\":\"\",\"language\":\"lucene\"},\"filter\":[],\"highlightAll\":true,\"version\":true}"
|
||||
},
|
||||
"optionsJSON": "{\"darkTheme\":false,\"useMargins\":true,\"hidePanelTitles\":false}",
|
||||
"panelsJSON": "[{\"panelIndex\":\"1\",\"gridData\":{\"x\":0,\"y\":0,\"w\":12,\"h\":2,\"i\":\"1\"},\"version\":\"6.1.0\",\"type\":\"visualization\",\"id\":\"a32bdf10-e4be-11e7-b4d1-8383451ae5a4\"},{\"panelIndex\":\"2\",\"gridData\":{\"x\":0,\"y\":2,\"w\":3,\"h\":5,\"i\":\"2\"},\"version\":\"6.1.0\",\"type\":\"visualization\",\"id\":\"3bf26530-e4be-11e7-b4d1-8383451ae5a4\"},{\"panelIndex\":\"3\",\"gridData\":{\"x\":3,\"y\":2,\"w\":4,\"h\":5,\"i\":\"3\"},\"version\":\"6.1.0\",\"type\":\"visualization\",\"id\":\"4a9d5c50-e4c0-11e7-b4d1-8383451ae5a4\"},{\"panelIndex\":\"4\",\"gridData\":{\"x\":7,\"y\":2,\"w\":5,\"h\":5,\"i\":\"4\"},\"version\":\"6.1.0\",\"type\":\"visualization\",\"id\":\"6a23e300-e4c0-11e7-b4d1-8383451ae5a4\"}]",
|
||||
"timeRestore": false,
|
||||
"title": "Icingabeat-CheckResults",
|
||||
"uiStateJSON": "{}",
|
||||
"version": 1
|
||||
},
|
||||
"id": "34e97340-e4ce-11e7-b4d1-8383451ae5a4",
|
||||
"type": "dashboard",
|
||||
"updated_at": "2017-12-27T07:40:36.094Z",
|
||||
"version": 1
|
||||
}
|
||||
],
|
||||
"version": "6.1.0"
|
||||
}
|
@ -1,134 +0,0 @@
|
||||
{
|
||||
"objects": [
|
||||
{
|
||||
"attributes": {
|
||||
"description": "",
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\"filter\":[],\"query\":{\"language\":\"lucene\",\"query\":\"\"}}"
|
||||
},
|
||||
"savedSearchId": "fa782860-e4bd-11e7-b4d1-8383451ae5a4",
|
||||
"title": "Notification Types",
|
||||
"uiStateJSON": "{}",
|
||||
"version": 1,
|
||||
"visState": "{\"title\":\"Notification Types\",\"type\":\"histogram\",\"params\":{\"addLegend\":true,\"addTimeMarker\":false,\"addTooltip\":true,\"categoryAxes\":[{\"id\":\"CategoryAxis-1\",\"labels\":{\"show\":true,\"truncate\":100},\"position\":\"bottom\",\"scale\":{\"type\":\"linear\"},\"show\":true,\"style\":{},\"title\":{},\"type\":\"category\"}],\"grid\":{\"categoryLines\":false,\"style\":{\"color\":\"#eee\"}},\"legendPosition\":\"right\",\"seriesParams\":[{\"data\":{\"id\":\"1\",\"label\":\"Count\"},\"drawLinesBetweenPoints\":true,\"mode\":\"stacked\",\"show\":\"true\",\"showCircles\":true,\"type\":\"histogram\",\"valueAxis\":\"ValueAxis-1\"}],\"times\":[],\"type\":\"histogram\",\"valueAxes\":[{\"id\":\"ValueAxis-1\",\"labels\":{\"filter\":false,\"rotate\":0,\"show\":true,\"truncate\":100},\"name\":\"LeftAxis-1\",\"position\":\"left\",\"scale\":{\"mode\":\"normal\",\"type\":\"linear\"},\"show\":true,\"style\":{},\"title\":{\"text\":\"Count\"},\"type\":\"value\"}]},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"date_histogram\",\"schema\":\"segment\",\"params\":{\"field\":\"@timestamp\",\"interval\":\"auto\",\"customInterval\":\"2h\",\"min_doc_count\":1,\"extended_bounds\":{}}},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"group\",\"params\":{\"field\":\"notification_type\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}}]}"
|
||||
},
|
||||
"id": "af54ac40-e4cd-11e7-b4d1-8383451ae5a4",
|
||||
"type": "visualization",
|
||||
"updated_at": "2017-12-27T07:40:37.107Z",
|
||||
"version": 1
|
||||
},
|
||||
{
|
||||
"attributes": {
|
||||
"description": "",
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\"filter\":[],\"query\":{\"query\":\"\",\"language\":\"lucene\"}}"
|
||||
},
|
||||
"savedSearchId": "fa782860-e4bd-11e7-b4d1-8383451ae5a4",
|
||||
"title": "Notification Types (Pie)",
|
||||
"uiStateJSON": "{}",
|
||||
"version": 1,
|
||||
"visState": "{\"title\":\"Notification Types (Pie)\",\"type\":\"pie\",\"params\":{\"type\":\"pie\",\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"isDonut\":true,\"labels\":{\"show\":false,\"values\":true,\"last_level\":true,\"truncate\":100}},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"notification_type\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}}]}"
|
||||
},
|
||||
"id": "caabba10-e4cd-11e7-b4d1-8383451ae5a4",
|
||||
"type": "visualization",
|
||||
"updated_at": "2017-12-27T07:40:37.107Z",
|
||||
"version": 1
|
||||
},
|
||||
{
|
||||
"attributes": {
|
||||
"description": "",
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\n \"filter\": [],\n \"query\": {\n \"query\": \"\",\n \"language\": \"lucene\"\n }\n}"
|
||||
},
|
||||
"savedSearchId": "fa782860-e4bd-11e7-b4d1-8383451ae5a4",
|
||||
"title": "Notification Services",
|
||||
"uiStateJSON": "{}",
|
||||
"version": 1,
|
||||
"visState": "{\n \"title\": \"Notification Services\",\n \"type\": \"pie\",\n \"params\": {\n \"type\": \"pie\",\n \"addTooltip\": true,\n \"addLegend\": true,\n \"legendPosition\": \"right\",\n \"isDonut\": true,\n \"labels\": {\n \"show\": false,\n \"values\": true,\n \"last_level\": true,\n \"truncate\": 100\n }\n },\n \"aggs\": [\n {\n \"id\": \"1\",\n \"enabled\": true,\n \"type\": \"count\",\n \"schema\": \"metric\",\n \"params\": {}\n },\n {\n \"id\": \"2\",\n \"enabled\": true,\n \"type\": \"terms\",\n \"schema\": \"segment\",\n \"params\": {\n \"field\": \"service\",\n \"size\": 5,\n \"order\": \"desc\",\n \"orderBy\": \"1\"\n }\n }\n ]\n}"
|
||||
},
|
||||
"id": "fcb31150-e4ca-11e7-b4d1-8383451ae5a4",
|
||||
"type": "visualization",
|
||||
"updated_at": "2017-12-27T07:56:13.974Z",
|
||||
"version": 2
|
||||
},
|
||||
{
|
||||
"attributes": {
|
||||
"description": "",
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\"filter\":[],\"query\":{\"query\":\"\",\"language\":\"lucene\"}}"
|
||||
},
|
||||
"savedSearchId": "fa782860-e4bd-11e7-b4d1-8383451ae5a4",
|
||||
"title": "Notification Hosts",
|
||||
"uiStateJSON": "{}",
|
||||
"version": 1,
|
||||
"visState": "{\"title\":\"Notification Hosts\",\"type\":\"pie\",\"params\":{\"type\":\"pie\",\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"isDonut\":true,\"labels\":{\"show\":false,\"values\":true,\"last_level\":true,\"truncate\":100}},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"host\",\"size\":500,\"order\":\"desc\",\"orderBy\":\"1\"}}]}"
|
||||
},
|
||||
"id": "e5a012a0-e4c6-11e7-b4d1-8383451ae5a4",
|
||||
"type": "visualization",
|
||||
"updated_at": "2017-12-27T07:56:02.651Z",
|
||||
"version": 3
|
||||
},
|
||||
{
|
||||
"attributes": {
|
||||
"description": "",
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\n \"filter\": [],\n \"query\": {\n \"query\": \"\",\n \"language\": \"lucene\"\n }\n}"
|
||||
},
|
||||
"savedSearchId": "fa782860-e4bd-11e7-b4d1-8383451ae5a4",
|
||||
"title": "Notifications by User",
|
||||
"uiStateJSON": "{}",
|
||||
"version": 1,
|
||||
"visState": "{\n \"title\": \"Notifications by User\",\n \"type\": \"pie\",\n \"params\": {\n \"type\": \"pie\",\n \"addTooltip\": true,\n \"addLegend\": true,\n \"legendPosition\": \"right\",\n \"isDonut\": true,\n \"labels\": {\n \"show\": false,\n \"values\": true,\n \"last_level\": true,\n \"truncate\": 100\n }\n },\n \"aggs\": [\n {\n \"id\": \"1\",\n \"enabled\": true,\n \"type\": \"count\",\n \"schema\": \"metric\",\n \"params\": {}\n },\n {\n \"id\": \"2\",\n \"enabled\": true,\n \"type\": \"terms\",\n \"schema\": \"segment\",\n \"params\": {\n \"field\": \"users\",\n \"size\": 5,\n \"order\": \"desc\",\n \"orderBy\": \"1\"\n }\n }\n ]\n}"
|
||||
},
|
||||
"id": "e95ca140-e4cd-11e7-b4d1-8383451ae5a4",
|
||||
"type": "visualization",
|
||||
"updated_at": "2017-12-27T07:56:25.109Z",
|
||||
"version": 2
|
||||
},
|
||||
{
|
||||
"attributes": {
|
||||
"columns": [
|
||||
"host",
|
||||
"service",
|
||||
"users",
|
||||
"text"
|
||||
],
|
||||
"description": "",
|
||||
"hits": 0,
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\n \"index\": \"icingabeat-*\",\n \"highlightAll\": true,\n \"version\": true,\n \"query\": {\n \"language\": \"lucene\",\n \"query\": \"type:icingabeat.event.notification\"\n },\n \"filter\": []\n}"
|
||||
},
|
||||
"sort": [
|
||||
"@timestamp",
|
||||
"desc"
|
||||
],
|
||||
"title": "Notifications",
|
||||
"version": 1
|
||||
},
|
||||
"id": "fa782860-e4bd-11e7-b4d1-8383451ae5a4",
|
||||
"type": "search",
|
||||
"updated_at": "2017-12-27T07:51:48.494Z",
|
||||
"version": 2
|
||||
},
|
||||
{
|
||||
"attributes": {
|
||||
"description": "Summary of notifications received by Icinga",
|
||||
"hits": 0,
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\"query\":{\"language\":\"lucene\",\"query\":\"\"},\"filter\":[],\"highlightAll\":true,\"version\":true}"
|
||||
},
|
||||
"optionsJSON": "{\"darkTheme\":false,\"hidePanelTitles\":false,\"useMargins\":true}",
|
||||
"panelsJSON": "[{\"panelIndex\":\"1\",\"gridData\":{\"x\":0,\"y\":0,\"w\":12,\"h\":2,\"i\":\"1\"},\"id\":\"af54ac40-e4cd-11e7-b4d1-8383451ae5a4\",\"type\":\"visualization\",\"version\":\"6.1.0\"},{\"panelIndex\":\"2\",\"gridData\":{\"x\":0,\"y\":2,\"w\":3,\"h\":2,\"i\":\"2\"},\"id\":\"caabba10-e4cd-11e7-b4d1-8383451ae5a4\",\"type\":\"visualization\",\"version\":\"6.1.0\"},{\"panelIndex\":\"3\",\"gridData\":{\"x\":3,\"y\":2,\"w\":3,\"h\":2,\"i\":\"3\"},\"id\":\"fcb31150-e4ca-11e7-b4d1-8383451ae5a4\",\"type\":\"visualization\",\"version\":\"6.1.0\"},{\"panelIndex\":\"4\",\"gridData\":{\"x\":6,\"y\":2,\"w\":3,\"h\":2,\"i\":\"4\"},\"id\":\"e5a012a0-e4c6-11e7-b4d1-8383451ae5a4\",\"type\":\"visualization\",\"version\":\"6.1.0\"},{\"panelIndex\":\"5\",\"gridData\":{\"x\":9,\"y\":2,\"w\":3,\"h\":2,\"i\":\"5\"},\"id\":\"e95ca140-e4cd-11e7-b4d1-8383451ae5a4\",\"type\":\"visualization\",\"version\":\"6.1.0\"},{\"panelIndex\":\"6\",\"gridData\":{\"x\":0,\"y\":4,\"w\":12,\"h\":11,\"i\":\"6\"},\"version\":\"6.1.0\",\"type\":\"search\",\"id\":\"fa782860-e4bd-11e7-b4d1-8383451ae5a4\"}]",
|
||||
"timeRestore": false,
|
||||
"title": "Icingabeat-Notifications",
|
||||
"uiStateJSON": "{}",
|
||||
"version": 1
|
||||
},
|
||||
"id": "ed031e90-e4ce-11e7-b4d1-8383451ae5a4",
|
||||
"type": "dashboard",
|
||||
"updated_at": "2017-12-27T07:40:37.107Z",
|
||||
"version": 1
|
||||
}
|
||||
],
|
||||
"version": "6.1.0"
|
||||
}
|
@ -1,209 +0,0 @@
|
||||
{
|
||||
"objects": [
|
||||
{
|
||||
"attributes": {
|
||||
"description": "",
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{}"
|
||||
},
|
||||
"title": "Icinga Logo",
|
||||
"uiStateJSON": "{}",
|
||||
"version": 1,
|
||||
"visState": "{\"title\":\"Icinga Logo\",\"type\":\"markdown\",\"params\":{\"fontSize\":12,\"markdown\":\"\"},\"aggs\":[]}"
|
||||
},
|
||||
"id": "77052890-e4c0-11e7-b4d1-8383451ae5a4",
|
||||
"type": "visualization",
|
||||
"updated_at": "2017-12-27T07:40:38.128Z",
|
||||
"version": 1
|
||||
},
|
||||
{
|
||||
"attributes": {
|
||||
"description": "",
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\"filter\":[],\"query\":{\"query\":\"\",\"language\":\"lucene\"}}"
|
||||
},
|
||||
"savedSearchId": "091fd610-e4be-11e7-b4d1-8383451ae5a4",
|
||||
"title": "Icinga Version",
|
||||
"uiStateJSON": "{}",
|
||||
"version": 1,
|
||||
"visState": "{\"title\":\"Icinga Version\",\"type\":\"pie\",\"params\":{\"type\":\"pie\",\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"top\",\"isDonut\":true,\"labels\":{\"show\":false,\"values\":true,\"last_level\":true,\"truncate\":100}},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"status.icingaapplication.app.version\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"_term\"}}]}"
|
||||
},
|
||||
"id": "bebb81b0-e4c1-11e7-b4d1-8383451ae5a4",
|
||||
"type": "visualization",
|
||||
"updated_at": "2017-12-27T07:40:38.128Z",
|
||||
"version": 1
|
||||
},
|
||||
{
|
||||
"attributes": {
|
||||
"description": "",
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\"filter\":[],\"query\":{\"query\":\"\",\"language\":\"lucene\"}}"
|
||||
},
|
||||
"savedSearchId": "091fd610-e4be-11e7-b4d1-8383451ae5a4",
|
||||
"title": "MySQL Schema Version",
|
||||
"uiStateJSON": "{}",
|
||||
"version": 1,
|
||||
"visState": "{\"title\":\"MySQL Schema Version\",\"type\":\"pie\",\"params\":{\"type\":\"pie\",\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"top\",\"isDonut\":true,\"labels\":{\"show\":false,\"values\":true,\"last_level\":true,\"truncate\":100}},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"status.idomysqlconnection.ido-mysql.version\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"_term\"}}]}"
|
||||
},
|
||||
"id": "73cd6b40-e4c2-11e7-b4d1-8383451ae5a4",
|
||||
"type": "visualization",
|
||||
"updated_at": "2017-12-27T07:40:38.128Z",
|
||||
"version": 1
|
||||
},
|
||||
{
|
||||
"attributes": {
|
||||
"description": "",
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\"filter\":[],\"query\":{\"query\":\"\",\"language\":\"lucene\"}}"
|
||||
},
|
||||
"savedSearchId": "091fd610-e4be-11e7-b4d1-8383451ae5a4",
|
||||
"title": "Nodes",
|
||||
"uiStateJSON": "{}",
|
||||
"version": 1,
|
||||
"visState": "{\"title\":\"Nodes\",\"type\":\"pie\",\"params\":{\"type\":\"pie\",\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"top\",\"isDonut\":true,\"labels\":{\"show\":false,\"values\":true,\"last_level\":true,\"truncate\":100}},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"status.icingaapplication.app.node_name\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"_term\"}}]}"
|
||||
},
|
||||
"id": "b37471e0-e4c6-11e7-b4d1-8383451ae5a4",
|
||||
"type": "visualization",
|
||||
"updated_at": "2017-12-27T07:40:38.128Z",
|
||||
"version": 1
|
||||
},
|
||||
{
|
||||
"attributes": {
|
||||
"description": "",
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{}"
|
||||
},
|
||||
"title": "Hostchecks by time",
|
||||
"uiStateJSON": "{}",
|
||||
"version": 1,
|
||||
"visState": "{\"title\":\"Hostchecks by time\",\"type\":\"timelion\",\"params\":{\"expression\":\".es(metric='avg:status.active_host_checks_1min').color(#070F4E).label(\\\"1 min\\\").title(\\\"Amount of Hostchecks\\\"),.es(metric='avg:status.active_host_checks_5min').color(#2772DB).label(\\\"5 min\\\"),.es(metric='avg:status.active_host_checks_15min').color(#3AB1C8).label(\\\"15 min\\\")\",\"interval\":\"1m\"},\"aggs\":[]}"
|
||||
},
|
||||
"id": "16cd5a60-e4c0-11e7-b4d1-8383451ae5a4",
|
||||
"type": "visualization",
|
||||
"updated_at": "2017-12-27T07:40:38.128Z",
|
||||
"version": 1
|
||||
},
|
||||
{
|
||||
"attributes": {
|
||||
"description": "",
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{}"
|
||||
},
|
||||
"title": "Servicechecks by time",
|
||||
"uiStateJSON": "{}",
|
||||
"version": 1,
|
||||
"visState": "{\"title\":\"Servicechecks by time\",\"type\":\"timelion\",\"params\":{\"expression\":\".es(metric='avg:status.active_service_checks_1min').color(#070F4E).label(\\\"1 min\\\").title(\\\"Amount of Servicechecks\\\"),.es(metric='avg:status.active_service_checks_5min').color(#2772DB).label(\\\"5 min\\\"),.es(metric='avg:status.active_service_checks_15min').color(#3AB1C8).label(\\\"15 min\\\")\",\"interval\":\"1m\"},\"aggs\":[]}"
|
||||
},
|
||||
"id": "fbb4acc0-e4cd-11e7-b4d1-8383451ae5a4",
|
||||
"type": "visualization",
|
||||
"updated_at": "2017-12-27T07:40:38.128Z",
|
||||
"version": 1
|
||||
},
|
||||
{
|
||||
"attributes": {
|
||||
"description": "",
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{}"
|
||||
},
|
||||
"title": "Endpoints comparisson",
|
||||
"uiStateJSON": "{}",
|
||||
"version": 1,
|
||||
"visState": "{\"title\":\"Endpoints comparisson\",\"type\":\"timelion\",\"params\":{\"expression\":\".es(metric='avg:status.api.num_endpoints').label(\\\"Endpoints\\\"), .es(metric='avg:status.api.num_not_conn_endpoints').label(\\\"Endpoints not connected\\\").title(\\\"Connected Endpoints\\\")\",\"interval\":\"1m\"},\"aggs\":[]}"
|
||||
},
|
||||
"id": "0c0685d0-e4bf-11e7-b4d1-8383451ae5a4",
|
||||
"type": "visualization",
|
||||
"updated_at": "2017-12-27T07:40:38.128Z",
|
||||
"version": 1
|
||||
},
|
||||
{
|
||||
"attributes": {
|
||||
"description": "",
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{}"
|
||||
},
|
||||
"title": "States of Hosts",
|
||||
"uiStateJSON": "{}",
|
||||
"version": 1,
|
||||
"visState": "{\"title\":\"States of Hosts\",\"type\":\"timelion\",\"params\":{\"expression\":\".es(metric='avg:status.num_hosts_up').color(#3EC8AC).label(\\\"Up\\\").title(\\\"States of Hosts\\\"),.es(metric='avg:status.num_hosts_down').color(#E94822).label(\\\"Down\\\"),.es(metric='avg:status.num_hosts_unreachable').color(#6E60A0).label(\\\"Unreachable\\\")\",\"interval\":\"1m\"},\"aggs\":[]}"
|
||||
},
|
||||
"id": "0d44fb70-e4ce-11e7-b4d1-8383451ae5a4",
|
||||
"type": "visualization",
|
||||
"updated_at": "2017-12-27T07:40:38.128Z",
|
||||
"version": 1
|
||||
},
|
||||
{
|
||||
"attributes": {
|
||||
"description": "",
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{}"
|
||||
},
|
||||
"title": "States of Services",
|
||||
"uiStateJSON": "{}",
|
||||
"version": 1,
|
||||
"visState": "{\"title\":\"States of Services\",\"type\":\"timelion\",\"params\":{\"expression\":\".es(metric='avg:status.num_services_ok').color(#3EC8AC).label(\\\"Ok\\\").title(\\\"States of Services\\\"),.es(metric='avg:status.num_services_warning').color(#F2910A).label(\\\"Warning\\\"),.es(metric='avg:status.num_services_critical').color(#E94822).label(\\\"Critical\\\")\",\"interval\":\"1m\"},\"aggs\":[]}"
|
||||
},
|
||||
"id": "204750b0-e4ce-11e7-b4d1-8383451ae5a4",
|
||||
"type": "visualization",
|
||||
"updated_at": "2017-12-27T07:40:38.128Z",
|
||||
"version": 1
|
||||
},
|
||||
{
|
||||
"attributes": {
|
||||
"description": "",
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{}"
|
||||
},
|
||||
"title": "MySQL Queries",
|
||||
"uiStateJSON": "{}",
|
||||
"version": 1,
|
||||
"visState": "{\"title\":\"MySQL Queries\",\"type\":\"timelion\",\"params\":{\"expression\":\".es(metric='avg:perfdata.idomysqlconnection_ido-mysql_queries_1min.value').color(#616EEF).label(\\\"1 min\\\").title(\\\"MySQL Queries\\\"), .es(metric='avg:perfdata.idomysqlconnection_ido-mysql_queries_5mins.value').color(#09A8FA).label(\\\"5 min\\\"), .es(metric='avg:perfdata.idomysqlconnection_ido-mysql_queries_15mins.value').color(#41C5D3).label(\\\"15 min\\\")\",\"interval\":\"1m\"},\"aggs\":[]}"
|
||||
},
|
||||
"id": "4d4cda00-e4c2-11e7-b4d1-8383451ae5a4",
|
||||
"type": "visualization",
|
||||
"updated_at": "2017-12-27T07:40:38.128Z",
|
||||
"version": 1
|
||||
},
|
||||
{
|
||||
"attributes": {
|
||||
"columns": [
|
||||
"_source"
|
||||
],
|
||||
"description": "",
|
||||
"hits": 0,
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\n \"index\": \"icingabeat-*\",\n \"highlightAll\": true,\n \"version\": true,\n \"query\": {\n \"language\": \"lucene\",\n \"query\": \"type:icingabeat.status*\"\n },\n \"filter\": []\n}"
|
||||
},
|
||||
"sort": [
|
||||
"@timestamp",
|
||||
"desc"
|
||||
],
|
||||
"title": "Statuspoller",
|
||||
"version": 1
|
||||
},
|
||||
"id": "091fd610-e4be-11e7-b4d1-8383451ae5a4",
|
||||
"type": "search",
|
||||
"updated_at": "2017-12-27T07:51:55.982Z",
|
||||
"version": 2
|
||||
},
|
||||
{
|
||||
"attributes": {
|
||||
"description": "Summary of Icinga Metrics",
|
||||
"hits": 0,
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\"query\":{\"language\":\"lucene\",\"query\":\"\"},\"filter\":[],\"highlightAll\":true,\"version\":true}"
|
||||
},
|
||||
"optionsJSON": "{\"darkTheme\":false,\"hidePanelTitles\":false,\"useMargins\":true}",
|
||||
"panelsJSON": "[{\"gridData\":{\"h\":2,\"i\":\"1\",\"w\":3,\"x\":0,\"y\":0},\"id\":\"77052890-e4c0-11e7-b4d1-8383451ae5a4\",\"panelIndex\":\"1\",\"type\":\"visualization\",\"version\":\"6.1.0\"},{\"gridData\":{\"h\":2,\"i\":\"2\",\"w\":3,\"x\":3,\"y\":0},\"id\":\"bebb81b0-e4c1-11e7-b4d1-8383451ae5a4\",\"panelIndex\":\"2\",\"type\":\"visualization\",\"version\":\"6.1.0\"},{\"gridData\":{\"h\":2,\"i\":\"3\",\"w\":3,\"x\":6,\"y\":0},\"id\":\"73cd6b40-e4c2-11e7-b4d1-8383451ae5a4\",\"panelIndex\":\"3\",\"type\":\"visualization\",\"version\":\"6.1.0\"},{\"gridData\":{\"h\":2,\"i\":\"4\",\"w\":3,\"x\":9,\"y\":0},\"id\":\"b37471e0-e4c6-11e7-b4d1-8383451ae5a4\",\"panelIndex\":\"4\",\"type\":\"visualization\",\"version\":\"6.1.0\"},{\"gridData\":{\"h\":3,\"i\":\"5\",\"w\":4,\"x\":0,\"y\":2},\"id\":\"16cd5a60-e4c0-11e7-b4d1-8383451ae5a4\",\"panelIndex\":\"5\",\"type\":\"visualization\",\"version\":\"6.1.0\"},{\"gridData\":{\"h\":3,\"i\":\"6\",\"w\":4,\"x\":4,\"y\":2},\"id\":\"fbb4acc0-e4cd-11e7-b4d1-8383451ae5a4\",\"panelIndex\":\"6\",\"type\":\"visualization\",\"version\":\"6.1.0\"},{\"gridData\":{\"h\":3,\"i\":\"7\",\"w\":4,\"x\":8,\"y\":2},\"id\":\"0c0685d0-e4bf-11e7-b4d1-8383451ae5a4\",\"panelIndex\":\"7\",\"type\":\"visualization\",\"version\":\"6.1.0\"},{\"gridData\":{\"h\":3,\"i\":\"8\",\"w\":4,\"x\":0,\"y\":5},\"id\":\"0d44fb70-e4ce-11e7-b4d1-8383451ae5a4\",\"panelIndex\":\"8\",\"type\":\"visualization\",\"version\":\"6.1.0\"},{\"gridData\":{\"h\":3,\"i\":\"9\",\"w\":4,\"x\":4,\"y\":5},\"id\":\"204750b0-e4ce-11e7-b4d1-8383451ae5a4\",\"panelIndex\":\"9\",\"type\":\"visualization\",\"version\":\"6.1.0\"},{\"gridData\":{\"h\":3,\"i\":\"10\",\"w\":4,\"x\":8,\"y\":5},\"id\":\"4d4cda00-e4c2-11e7-b4d1-8383451ae5a4\",\"panelIndex\":\"10\",\"type\":\"visualization\",\"version\":\"6.1.0\"}]",
|
||||
"timeRestore": false,
|
||||
"title": "Icingabeat-Status",
|
||||
"uiStateJSON": "{}",
|
||||
"version": 1
|
||||
},
|
||||
"id": "a13f1a80-e4cf-11e7-b4d1-8383451ae5a4",
|
||||
"type": "dashboard",
|
||||
"updated_at": "2017-12-27T07:40:38.128Z",
|
||||
"version": 1
|
||||
}
|
||||
],
|
||||
"version": "6.1.0"
|
||||
}
|
File diff suppressed because one or more lines are too long
@ -1,16 +0,0 @@
|
||||
{
|
||||
"sort": [
|
||||
"@timestamp",
|
||||
"desc"
|
||||
],
|
||||
"hits": 0,
|
||||
"description": "",
|
||||
"title": "CheckResults",
|
||||
"version": 1,
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\"index\":\"icingabeat-*\",\"query\":{\"query_string\":{\"query\":\"type:icingabeat.event.checkresult\",\"analyze_wildcard\":true}},\"filter\":[],\"highlight\":{\"pre_tags\":[\"@kibana-highlighted-field@\"],\"post_tags\":[\"@/kibana-highlighted-field@\"],\"fields\":{\"*\":{}},\"require_field_match\":false,\"fragment_size\":2147483647}}"
|
||||
},
|
||||
"columns": [
|
||||
"_source"
|
||||
]
|
||||
}
|
@ -1,16 +0,0 @@
|
||||
{
|
||||
"sort": [
|
||||
"@timestamp",
|
||||
"desc"
|
||||
],
|
||||
"hits": 0,
|
||||
"description": "",
|
||||
"title": "Statuspoller",
|
||||
"version": 1,
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\"index\":\"icingabeat-*\",\"filter\":[],\"highlight\":{\"pre_tags\":[\"@kibana-highlighted-field@\"],\"post_tags\":[\"@/kibana-highlighted-field@\"],\"fields\":{\"*\":{}},\"require_field_match\":false,\"fragment_size\":2147483647},\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"type:icingabeat.status*\"}}}"
|
||||
},
|
||||
"columns": [
|
||||
"_source"
|
||||
]
|
||||
}
|
@ -1,20 +0,0 @@
|
||||
{
|
||||
"sort": [
|
||||
"@timestamp",
|
||||
"desc"
|
||||
],
|
||||
"hits": 0,
|
||||
"description": "",
|
||||
"title": "Notifications",
|
||||
"version": 1,
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\"index\":\"icingabeat-*\",\"filter\":[],\"highlight\":{\"pre_tags\":[\"@kibana-highlighted-field@\"],\"post_tags\":[\"@/kibana-highlighted-field@\"],\"fields\":{\"*\":{}},\"require_field_match\":false,\"fragment_size\":2147483647},\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"type:icingabeat.event.notification\"}}}"
|
||||
},
|
||||
"columns": [
|
||||
"host",
|
||||
"service",
|
||||
"notification_type",
|
||||
"text",
|
||||
"users"
|
||||
]
|
||||
}
|
@ -1,10 +0,0 @@
|
||||
{
|
||||
"visState": "{\"title\":\"Endpoints comparisson\",\"type\":\"timelion\",\"params\":{\"expression\":\".es(metric='avg:status.api.num_endpoints').label(\\\"Endpoints\\\"), .es(metric='avg:status.api.num_not_conn_endpoints').label(\\\"Endpoints not connected\\\").title(\\\"Connected Endpoints\\\")\",\"interval\":\"auto\"},\"aggs\":[],\"listeners\":{}}",
|
||||
"description": "",
|
||||
"title": "Endpoints comparisson",
|
||||
"uiStateJSON": "{}",
|
||||
"version": 1,
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true}},\"filter\":[]}"
|
||||
}
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
{
|
||||
"visState": "{\"title\":\"Nodes\",\"type\":\"pie\",\"params\":{\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"isDonut\":false},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"status.icingaapplication.app.node_name\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}",
|
||||
"description": "",
|
||||
"title": "Nodes",
|
||||
"uiStateJSON": "{}",
|
||||
"version": 1,
|
||||
"savedSearchId": "5570eb90-098a-11e7-9f17-cf3a85e0d1dc",
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\"filter\":[]}"
|
||||
}
|
||||
}
|
@ -1,10 +0,0 @@
|
||||
{
|
||||
"visState": "{\"title\":\"States of Hosts\",\"type\":\"timelion\",\"params\":{\"expression\":\".es(metric='avg:status.num_hosts_up').color(#3EC8AC).label(\\\"Up\\\").title(\\\"States of Hosts\\\"),.es(metric='avg:status.num_hosts_down').color(#E94822).label(\\\"Down\\\"),.es(metric='avg:status.num_hosts_unreachable').color(#6E60A0).label(\\\"Unreachable\\\")\",\"interval\":\"1m\"},\"aggs\":[],\"listeners\":{}}",
|
||||
"description": "",
|
||||
"title": "States of Hosts",
|
||||
"uiStateJSON": "{}",
|
||||
"version": 1,
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true}},\"filter\":[]}"
|
||||
}
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
{
|
||||
"visState": "{\"title\":\"Icinga Version\",\"type\":\"pie\",\"params\":{\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"isDonut\":false},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"status.icingaapplication.app.version\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"\"}}],\"listeners\":{}}",
|
||||
"description": "",
|
||||
"title": "Icinga Version",
|
||||
"uiStateJSON": "{\"vis\":{\"legendOpen\":true}}",
|
||||
"version": 1,
|
||||
"savedSearchId": "5570eb90-098a-11e7-9f17-cf3a85e0d1dc",
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\"filter\":[]}"
|
||||
}
|
||||
}
|
@ -1,10 +0,0 @@
|
||||
{
|
||||
"visState": "{\"title\":\"Hostchecks by time\",\"type\":\"timelion\",\"params\":{\"expression\":\".es(metric='avg:status.active_host_checks_1min').color(#070F4E).label(\\\"1 min\\\").title(\\\"Amount of Hostchecks\\\"),.es(metric='avg:status.active_host_checks_5min').color(#2772DB).label(\\\"5 min\\\"),.es(metric='avg:status.active_host_checks_15min').color(#3AB1C8).label(\\\"15 min\\\")\",\"interval\":\"auto\"},\"aggs\":[],\"listeners\":{}}",
|
||||
"description": "",
|
||||
"title": "Hostchecks by time",
|
||||
"uiStateJSON": "{}",
|
||||
"version": 1,
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true}},\"filter\":[]}"
|
||||
}
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
{
|
||||
"visState": "{\"title\":\"Notifications by User\",\"type\":\"pie\",\"params\":{\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"isDonut\":false},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"filters\",\"schema\":\"segment\",\"params\":{\"filters\":[{\"input\":{\"query\":{\"query_string\":{\"query\":\"users:bob\",\"analyze_wildcard\":true}}},\"label\":\"Bob\"},{\"input\":{\"query\":{\"query_string\":{\"query\":\"users:on-call\",\"analyze_wildcard\":true}}},\"label\":\"On-Call\"}]}}],\"listeners\":{}}",
|
||||
"description": "",
|
||||
"title": "Notifications by User",
|
||||
"uiStateJSON": "{}",
|
||||
"version": 1,
|
||||
"savedSearchId": "9b1ca350-0b1a-11e7-ad60-c7e10cd34b29",
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\"filter\":[]}"
|
||||
}
|
||||
}
|
@ -1,10 +0,0 @@
|
||||
{
|
||||
"visState": "{\"title\":\"Servicechecks by time\",\"type\":\"timelion\",\"params\":{\"expression\":\".es(metric='avg:status.active_service_checks_1min').color(#070F4E).label(\\\"1 min\\\").title(\\\"Amount of Servicechecks\\\"),.es(metric='avg:status.active_service_checks_5min').color(#2772DB).label(\\\"5 min\\\"),.es(metric='avg:status.active_service_checks_15min').color(#3AB1C8).label(\\\"15 min\\\")\",\"interval\":\"1m\"},\"aggs\":[],\"listeners\":{}}",
|
||||
"description": "",
|
||||
"title": "Servicechecks by time",
|
||||
"uiStateJSON": "{}",
|
||||
"version": 1,
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true}},\"filter\":[]}"
|
||||
}
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
{
|
||||
"visState": "{\"title\":\"CheckResults by State\",\"type\":\"histogram\",\"params\":{\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"scale\":\"linear\",\"mode\":\"stacked\",\"times\":[],\"addTimeMarker\":false,\"defaultYExtents\":false,\"setYExtents\":false,\"orderBucketsBySum\":false},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"date_histogram\",\"schema\":\"segment\",\"params\":{\"field\":\"@timestamp\",\"interval\":\"auto\",\"customInterval\":\"2h\",\"min_doc_count\":1,\"extended_bounds\":{}}},{\"id\":\"3\",\"enabled\":true,\"type\":\"filters\",\"schema\":\"group\",\"params\":{\"filters\":[{\"input\":{\"query\":{\"query_string\":{\"query\":\"check_result.state:0\",\"analyze_wildcard\":true}}},\"label\":\"0: OK\"},{\"input\":{\"query\":{\"query_string\":{\"query\":\"check_result.state:1\",\"analyze_wildcard\":true}}},\"label\":\"1: Warning\"},{\"input\":{\"query\":{\"query_string\":{\"query\":\"check_result.state:2\",\"analyze_wildcard\":true}}},\"label\":\"2: Critical\"},{\"input\":{\"query\":{\"query_string\":{\"query\":\"check_result.state:3\",\"analyze_wildcard\":true}}},\"label\":\"3: Unknown\"}]}}],\"listeners\":{}}",
|
||||
"description": "",
|
||||
"title": "CheckResults by State",
|
||||
"uiStateJSON": "{\"vis\":{\"colors\":{\"0\":\"#629E51\",\"1\":\"#E5AC0E\",\"2\":\"#BF1B00\",\"Ok\":\"#508642\",\"Critical\":\"#BF1B00\",\"Warning\":\"#EAB839\",\"Unknown\":\"#962D82\",\"0: OK\":\"#629E51\",\"1: Warning\":\"#E5AC0E\",\"2: Critical\":\"#BF1B00\",\"3: Unknown\":\"#962D82\"}}}",
|
||||
"version": 1,
|
||||
"savedSearchId": "5091de50-0975-11e7-a4dd-e96fa284b426",
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\"filter\":[]}"
|
||||
}
|
||||
}
|
@ -1,10 +0,0 @@
|
||||
{
|
||||
"visState": "{\"title\":\"MySQL Queries\",\"type\":\"timelion\",\"params\":{\"expression\":\".es(metric='avg:perfdata.idomysqlconnection_ido-mysql_queries_1min.value').color(#616EEF).label(\\\"1 min\\\").title(\\\"MySQL Queries\\\"), .es(metric='avg:perfdata.idomysqlconnection_ido-mysql_queries_5mins.value').color(#09A8FA).label(\\\"5 min\\\"), .es(metric='avg:perfdata.idomysqlconnection_ido-mysql_queries_15mins.value').color(#41C5D3).label(\\\"15 min\\\")\",\"interval\":\"1m\"},\"aggs\":[],\"listeners\":{}}",
|
||||
"description": "",
|
||||
"title": "MySQL Queries",
|
||||
"uiStateJSON": "{}",
|
||||
"version": 1,
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true}},\"filter\":[]}"
|
||||
}
|
||||
}
|
@ -1,10 +0,0 @@
|
||||
{
|
||||
"visState": "{\"title\":\"Icinga Logo\",\"type\":\"markdown\",\"params\":{\"markdown\":\"\"},\"aggs\":[],\"listeners\":{}}",
|
||||
"description": "",
|
||||
"title": "Icinga Logo",
|
||||
"uiStateJSON": "{}",
|
||||
"version": 1,
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true}},\"filter\":[]}"
|
||||
}
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
{
|
||||
"visState": "{\"title\":\"Services Tag Cloud\",\"type\":\"tagcloud\",\"params\":{\"scale\":\"linear\",\"orientation\":\"single\",\"minFontSize\":18,\"maxFontSize\":72},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"service\",\"size\":50,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Services\"}}],\"listeners\":{}}",
|
||||
"description": "",
|
||||
"title": "Services Tag Cloud",
|
||||
"uiStateJSON": "{}",
|
||||
"version": 1,
|
||||
"savedSearchId": "5091de50-0975-11e7-a4dd-e96fa284b426",
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\"filter\":[]}"
|
||||
}
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
{
|
||||
"visState": "{\"title\":\"Notification Services\",\"type\":\"pie\",\"params\":{\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"isDonut\":false},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"service\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}",
|
||||
"description": "",
|
||||
"title": "Notification Services",
|
||||
"uiStateJSON": "{}",
|
||||
"version": 1,
|
||||
"savedSearchId": "9b1ca350-0b1a-11e7-ad60-c7e10cd34b29",
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\"filter\":[]}"
|
||||
}
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
{
|
||||
"visState": "{\"title\":\"CheckResult Count\",\"type\":\"metric\",\"params\":{\"handleNoResults\":true,\"fontSize\":60},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{\"customLabel\":\"CheckResults received\"}}],\"listeners\":{}}",
|
||||
"description": "",
|
||||
"title": "CheckResult Count",
|
||||
"uiStateJSON": "{}",
|
||||
"version": 1,
|
||||
"savedSearchId": "5091de50-0975-11e7-a4dd-e96fa284b426",
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\"filter\":[]}"
|
||||
}
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
{
|
||||
"visState": "{\"title\":\"Notification Hosts\",\"type\":\"pie\",\"params\":{\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"isDonut\":false},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"host\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}",
|
||||
"description": "",
|
||||
"title": "Notification Hosts",
|
||||
"uiStateJSON": "{}",
|
||||
"version": 1,
|
||||
"savedSearchId": "9b1ca350-0b1a-11e7-ad60-c7e10cd34b29",
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\"filter\":[]}"
|
||||
}
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
{
|
||||
"visState": "{\"title\":\"Hosts Tag Cloud\",\"type\":\"tagcloud\",\"params\":{\"scale\":\"linear\",\"orientation\":\"single\",\"minFontSize\":18,\"maxFontSize\":72},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"host\",\"size\":50,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Hosts\"}}],\"listeners\":{}}",
|
||||
"description": "",
|
||||
"title": "Hosts Tag Cloud",
|
||||
"uiStateJSON": "{}",
|
||||
"version": 1,
|
||||
"savedSearchId": "5091de50-0975-11e7-a4dd-e96fa284b426",
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\"filter\":[]}"
|
||||
}
|
||||
}
|
@ -1,10 +0,0 @@
|
||||
{
|
||||
"visState": "{\"title\":\"States of Services\",\"type\":\"timelion\",\"params\":{\"expression\":\".es(metric='avg:status.num_services_ok').color(#3EC8AC).label(\\\"Ok\\\").title(\\\"States of Services\\\"),.es(metric='avg:status.num_services_warning').color(#F2910A).label(\\\"Warning\\\"),.es(metric='avg:status.num_services_critical').color(#E94822).label(\\\"Critical\\\")\",\"interval\":\"1m\"},\"aggs\":[],\"listeners\":{}}",
|
||||
"description": "",
|
||||
"title": "States of Services",
|
||||
"uiStateJSON": "{}",
|
||||
"version": 1,
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true}},\"filter\":[]}"
|
||||
}
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
{
|
||||
"visState": "{\"title\":\"Notification Types\",\"type\":\"pie\",\"params\":{\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"isDonut\":false},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"notification_type\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}",
|
||||
"description": "",
|
||||
"title": "Notification Types",
|
||||
"uiStateJSON": "{}",
|
||||
"version": 1,
|
||||
"savedSearchId": "9b1ca350-0b1a-11e7-ad60-c7e10cd34b29",
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\"filter\":[]}"
|
||||
}
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
{
|
||||
"visState": "{\"title\":\"MySQL Schema Version\",\"type\":\"pie\",\"params\":{\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"isDonut\":false},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"status.idomysqlconnection.ido-mysql.version\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}",
|
||||
"description": "",
|
||||
"title": "MySQL Schema Version",
|
||||
"uiStateJSON": "{}",
|
||||
"version": 1,
|
||||
"savedSearchId": "5570eb90-098a-11e7-9f17-cf3a85e0d1dc",
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\"filter\":[]}"
|
||||
}
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
{
|
||||
"visState": "{\"title\":\"Notification Types\",\"type\":\"histogram\",\"params\":{\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"scale\":\"linear\",\"mode\":\"stacked\",\"times\":[],\"addTimeMarker\":false,\"defaultYExtents\":false,\"setYExtents\":false},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"date_histogram\",\"schema\":\"segment\",\"params\":{\"field\":\"@timestamp\",\"interval\":\"auto\",\"customInterval\":\"2h\",\"min_doc_count\":1,\"extended_bounds\":{}}},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"group\",\"params\":{\"field\":\"notification_type\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}",
|
||||
"description": "",
|
||||
"title": "Notification Types",
|
||||
"uiStateJSON": "{}",
|
||||
"version": 1,
|
||||
"savedSearchId": "9b1ca350-0b1a-11e7-ad60-c7e10cd34b29",
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\"filter\":[]}"
|
||||
}
|
||||
}
|
@ -12,9 +12,9 @@ import (
|
||||
|
||||
"github.com/icinga/icingabeat/config"
|
||||
|
||||
"github.com/elastic/beats/libbeat/beat"
|
||||
"github.com/elastic/beats/libbeat/common"
|
||||
"github.com/elastic/beats/libbeat/logp"
|
||||
"github.com/elastic/beats/v7/libbeat/beat"
|
||||
"github.com/elastic/beats/v7/libbeat/common"
|
||||
"github.com/elastic/beats/v7/libbeat/logp"
|
||||
)
|
||||
|
||||
// Eventstream type
|
||||
@ -51,39 +51,39 @@ func BuildEventstreamEvent(e []byte) beat.Event {
|
||||
event.Fields = common.MapStr{}
|
||||
|
||||
for key, value := range icingaEvent {
|
||||
event.Fields.Put(key, value)
|
||||
event.Fields.Put(target_key+key, value)
|
||||
}
|
||||
|
||||
logp.Debug("icingabeat", "Type: %v", icingaEvent["type"])
|
||||
switch icingaEvent["type"] {
|
||||
case "CheckResult", "StateChange", "Notification":
|
||||
checkResult := icingaEvent["check_result"].(map[string]interface{})
|
||||
event.Fields.Put("check_result.execution_start", FloatToTimestamp(checkResult["execution_start"].(float64)))
|
||||
event.Fields.Put("check_result.execution_end", FloatToTimestamp(checkResult["execution_end"].(float64)))
|
||||
event.Fields.Put("check_result.schedule_start", FloatToTimestamp(checkResult["schedule_start"].(float64)))
|
||||
event.Fields.Put("check_result.schedule_end", FloatToTimestamp(checkResult["schedule_end"].(float64)))
|
||||
event.Fields.Delete("check_result.performance_data")
|
||||
event.Fields.Put(target_key+"check_result.execution_start", FloatToTimestamp(checkResult["execution_start"].(float64)))
|
||||
event.Fields.Put(target_key+"check_result.execution_end", FloatToTimestamp(checkResult["execution_end"].(float64)))
|
||||
event.Fields.Put(target_key+"check_result.schedule_start", FloatToTimestamp(checkResult["schedule_start"].(float64)))
|
||||
event.Fields.Put(target_key+"check_result.schedule_end", FloatToTimestamp(checkResult["schedule_end"].(float64)))
|
||||
event.Fields.Delete(target_key + "check_result.performance_data")
|
||||
|
||||
case "AcknowledgementSet":
|
||||
event.Delete("comment")
|
||||
event.Fields.Put("comment.text", icingaEvent["comment"])
|
||||
event.Fields.Put("expiry", FloatToTimestamp(icingaEvent["expiry"].(float64)))
|
||||
event.Fields.Put(target_key+"comment.text", icingaEvent["comment"])
|
||||
event.Fields.Put(target_key+"expiry", FloatToTimestamp(icingaEvent["expiry"].(float64)))
|
||||
|
||||
case "CommentAdded", "CommentRemoved":
|
||||
comment := icingaEvent["comment"].(map[string]interface{})
|
||||
event.Fields.Put("comment.entry_time", FloatToTimestamp(comment["entry_time"].(float64)))
|
||||
event.Fields.Put("comment.expire_time", FloatToTimestamp(comment["expire_time"].(float64)))
|
||||
event.Fields.Put(target_key+"comment.entry_time", FloatToTimestamp(comment["entry_time"].(float64)))
|
||||
event.Fields.Put(target_key+"comment.expire_time", FloatToTimestamp(comment["expire_time"].(float64)))
|
||||
|
||||
case "DowntimeAdded", "DowntimeRemoved", "DowntimeStarted", "DowntimeTriggered":
|
||||
downtime := icingaEvent["downtime"].(map[string]interface{})
|
||||
event.Fields.Put("downtime.end_time", FloatToTimestamp(downtime["end_time"].(float64)))
|
||||
event.Fields.Put("downtime.entry_time", FloatToTimestamp(downtime["entry_time"].(float64)))
|
||||
event.Fields.Put("downtime.start_time", FloatToTimestamp(downtime["start_time"].(float64)))
|
||||
event.Fields.Put("downtime.trigger_time", FloatToTimestamp(downtime["trigger_time"].(float64)))
|
||||
event.Fields.Put(target_key+"downtime.end_time", FloatToTimestamp(downtime["end_time"].(float64)))
|
||||
event.Fields.Put(target_key+"downtime.entry_time", FloatToTimestamp(downtime["entry_time"].(float64)))
|
||||
event.Fields.Put(target_key+"downtime.start_time", FloatToTimestamp(downtime["start_time"].(float64)))
|
||||
event.Fields.Put(target_key+"downtime.trigger_time", FloatToTimestamp(downtime["trigger_time"].(float64)))
|
||||
}
|
||||
|
||||
event.Fields.Put("type", "icingabeat.event."+strings.ToLower(icingaEvent["type"].(string)))
|
||||
event.Fields.Put("timestamp", FloatToTimestamp(icingaEvent["timestamp"].(float64)))
|
||||
event.Fields.Put(target_key+"timestamp", FloatToTimestamp(icingaEvent["timestamp"].(float64)))
|
||||
|
||||
return event
|
||||
}
|
||||
|
@ -4,13 +4,12 @@ import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/elastic/beats/libbeat/logp"
|
||||
"github.com/elastic/beats/v7/libbeat/logp"
|
||||
)
|
||||
|
||||
func requestURL(bt *Icingabeat, method string, URL *url.URL) (*http.Response, error) {
|
||||
@ -32,7 +31,6 @@ func requestURL(bt *Icingabeat, method string, URL *url.URL) (*http.Response, er
|
||||
skipSslVerify = true
|
||||
}
|
||||
|
||||
fmt.Print(bt.config.SSL.CertificateAuthorities)
|
||||
tlsConfig := &tls.Config{
|
||||
InsecureSkipVerify: skipSslVerify,
|
||||
RootCAs: certPool,
|
||||
@ -68,6 +66,9 @@ func requestURL(bt *Icingabeat, method string, URL *url.URL) (*http.Response, er
|
||||
case 401:
|
||||
err = errors.New("Authentication failed for user " + bt.config.User)
|
||||
defer response.Body.Close()
|
||||
case 404:
|
||||
err = errors.New("404 Not Found. Missing permissions may be a reason for this.")
|
||||
defer response.Body.Close()
|
||||
}
|
||||
|
||||
return response, err
|
||||
|
@ -3,9 +3,9 @@ package beater
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/elastic/beats/libbeat/beat"
|
||||
"github.com/elastic/beats/libbeat/common"
|
||||
"github.com/elastic/beats/libbeat/logp"
|
||||
"github.com/elastic/beats/v7/libbeat/beat"
|
||||
"github.com/elastic/beats/v7/libbeat/common"
|
||||
"github.com/elastic/beats/v7/libbeat/logp"
|
||||
|
||||
"github.com/icinga/icingabeat/config"
|
||||
)
|
||||
@ -17,6 +17,8 @@ type Icingabeat struct {
|
||||
client beat.Client
|
||||
}
|
||||
|
||||
var target_key = "icinga."
|
||||
|
||||
// New beater
|
||||
func New(b *beat.Beat, cfg *common.Config) (beat.Beater, error) {
|
||||
config := config.DefaultConfig
|
||||
@ -47,7 +49,6 @@ func (bt *Icingabeat) Run(b *beat.Beat) error {
|
||||
go eventstream.Run()
|
||||
}
|
||||
|
||||
fmt.Print(bt.config.Statuspoller.Interval)
|
||||
if bt.config.Statuspoller.Interval > 0 {
|
||||
var statuspoller *Statuspoller
|
||||
statuspoller = NewStatuspoller(bt, bt.config)
|
||||
|
@ -10,9 +10,9 @@ import (
|
||||
|
||||
"github.com/icinga/icingabeat/config"
|
||||
|
||||
"github.com/elastic/beats/libbeat/beat"
|
||||
"github.com/elastic/beats/libbeat/common"
|
||||
"github.com/elastic/beats/libbeat/logp"
|
||||
"github.com/elastic/beats/v7/libbeat/beat"
|
||||
"github.com/elastic/beats/v7/libbeat/common"
|
||||
"github.com/elastic/beats/v7/libbeat/logp"
|
||||
)
|
||||
|
||||
// Statuspoller type
|
||||
@ -56,11 +56,20 @@ func BuildStatusEvents(body []byte) []beat.Event {
|
||||
switch statusvalue.(type) {
|
||||
case map[string]interface{}:
|
||||
if len(statusvalue.(map[string]interface{})) > 0 {
|
||||
event.Fields.Put(key, value)
|
||||
for key, value := range value.(map[string]interface{}) {
|
||||
if key == "api" {
|
||||
// "zones" can include a massive amount of data, depending
|
||||
// on the number of connected agents and satellites
|
||||
// since enough data is included in other keys, we're
|
||||
// removing "zones" explicitly
|
||||
delete(value.(map[string]interface{}), "zones")
|
||||
}
|
||||
}
|
||||
event.Fields.Put(target_key+key, value)
|
||||
}
|
||||
|
||||
default:
|
||||
event.Fields.Put(key, value)
|
||||
event.Fields.Put(target_key+key, value)
|
||||
}
|
||||
|
||||
}
|
||||
@ -74,22 +83,21 @@ func BuildStatusEvents(body []byte) []beat.Event {
|
||||
case interface{}:
|
||||
key = "perfdata." + perfdata.(map[string]interface{})["label"].(string)
|
||||
value = perfdata
|
||||
event.Fields.Put(key, value)
|
||||
event.Fields.Put(target_key+key, value)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
case "name":
|
||||
key = "type"
|
||||
value = "icingabeat.status." + strings.ToLower(value.(string))
|
||||
event.Fields.Put(key, value)
|
||||
event.Fields.Put("type", value)
|
||||
|
||||
default:
|
||||
event.Fields.Put(key, value)
|
||||
event.Fields.Put(target_key+key, value)
|
||||
}
|
||||
}
|
||||
|
||||
if statusAvailable, _ := event.Fields.HasKey("status"); statusAvailable == true {
|
||||
if statusAvailable, _ := event.Fields.HasKey(target_key + "status"); statusAvailable == true {
|
||||
statusEvents = append(statusEvents, event)
|
||||
}
|
||||
}
|
||||
|
@ -3,11 +3,12 @@ package cmd
|
||||
import (
|
||||
"github.com/icinga/icingabeat/beater"
|
||||
|
||||
cmd "github.com/elastic/beats/libbeat/cmd"
|
||||
cmd "github.com/elastic/beats/v7/libbeat/cmd"
|
||||
"github.com/elastic/beats/v7/libbeat/cmd/instance"
|
||||
)
|
||||
|
||||
// Name of this beat
|
||||
var Name = "icingabeat"
|
||||
|
||||
// RootCmd to handle beats cli
|
||||
var RootCmd = cmd.GenRootCmd(Name, "", beater.New)
|
||||
var RootCmd = cmd.GenRootCmdWithSettings(beater.New, instance.Settings{Name: Name})
|
||||
|
@ -1,6 +1,6 @@
|
||||
dashboards:
|
||||
- id: 34e97340-e4ce-11e7-b4d1-8383451ae5a4
|
||||
file: Icingabeat-CheckRestuls.json
|
||||
file: Icingabeat-CheckResults.json
|
||||
|
||||
- id: ed031e90-e4ce-11e7-b4d1-8383451ae5a4
|
||||
file: Icingabeat-Notifications.json
|
||||
|
@ -1 +1 @@
|
||||
{"uuid":"0409fabd-956a-4000-9090-22c9c0b438af"}
|
||||
{"uuid":"0409fabd-956a-4000-9090-22c9c0b438af","first_start":"2022-05-31T13:14:26.86643+02:00"}
|
||||
|
@ -10,7 +10,7 @@ or Logstash.
|
||||
> Elasticsearch, either directly or via Logstash, so it can be visualized with
|
||||
> Kibana.
|
||||
|
||||
 | 
|
||||
 | 
|
||||
-------------------------------------------------|-------------------------------------
|
||||
|
||||
## Eventstream
|
||||
|
@ -71,7 +71,7 @@ Make sure you have configured Icingabeat properly before starting it. Use one
|
||||
of the following commands to start Icingabeat:
|
||||
|
||||
* `service icingabeat start` or
|
||||
* `systemctl icingabeat start` or
|
||||
* `systemctl start icingabeat` or
|
||||
* `/etc/init.d/icingabeat start`
|
||||
|
||||
## Dashboards
|
||||
@ -79,17 +79,12 @@ We have dashboards prepared that you can use when getting started with
|
||||
Icingabeat. They are meant to give you some inspiration before you start
|
||||
exploring the data by yourself.
|
||||
|
||||
**Note:** The dashboards require Kibana >= 6.0
|
||||
Starting with icingabeat v7.17.4 you have to download and import the dashboards manually.
|
||||
|
||||
Import dashboards and index pattern:
|
||||
``` shell
|
||||
icingabeat setup
|
||||
```
|
||||
Download and upack `dashboards.zip` from the [latest release](https://github.com/Icinga/icingabeat/releases/latest) page.
|
||||
|
||||
Set Kibana host, user and password if necessary:
|
||||
``` shell
|
||||
icingabeat setup -E setup.kibana.host=127.0.0.1:5601 -E setup.kibana.username=elastic -E setup.kibana.password=secret
|
||||
```
|
||||
Use Kibana's Import functionality to upload the `*.ndjson` files which will
|
||||
import a bunch of saved objects, including dashboards and single visualizations.
|
||||
|
||||
## Manual Installation
|
||||
|
||||
|
15821
docs/fields.asciidoc
15821
docs/fields.asciidoc
File diff suppressed because it is too large
Load Diff
11602
fields.yml
11602
fields.yml
File diff suppressed because it is too large
Load Diff
155
go.mod
Normal file
155
go.mod
Normal file
@ -0,0 +1,155 @@
|
||||
module github.com/icinga/icingabeat
|
||||
|
||||
go 1.17
|
||||
|
||||
require (
|
||||
github.com/tsg/go-daemon v0.0.0-20200207173439-e704b93fd89b
|
||||
github.com/blakesmith/ar v0.0.0-20150311145944-8bd4349a67f2
|
||||
github.com/Microsoft/go-winio v0.5.1 // indirect
|
||||
github.com/Shopify/sarama v0.0.0-00010101000000-000000000000 // indirect
|
||||
github.com/StackExchange/wmi v0.0.0-20170221213301-9f32b5905fd6 // indirect
|
||||
github.com/akavel/rsrc v0.8.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.1 // indirect
|
||||
github.com/dlclark/regexp2 v1.1.7-0.20171009020623-7632a260cbaf // indirect
|
||||
github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7 // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/dop251/goja v0.0.0-20200831102558-9af81ddcf0e1 // indirect
|
||||
github.com/dop251/goja_nodejs v0.0.0-20171011081505-adff31b136e6 // indirect
|
||||
github.com/dustin/go-humanize v1.0.0 // indirect
|
||||
github.com/eapache/go-resiliency v1.2.0 // indirect
|
||||
github.com/elastic/ecs v1.12.0 // indirect
|
||||
github.com/elastic/elastic-agent-client/v7 v7.0.0-20210727140539-f0905d9377f6 // indirect
|
||||
github.com/elastic/go-concert v0.2.0 // indirect
|
||||
github.com/elastic/go-lumber v0.1.0 // indirect
|
||||
github.com/elastic/go-seccomp-bpf v1.2.0 // indirect
|
||||
github.com/elastic/go-structform v0.0.9 // indirect
|
||||
github.com/elastic/go-sysinfo v1.7.1 // indirect
|
||||
github.com/elastic/go-txfile v0.0.7 // indirect
|
||||
github.com/elastic/go-ucfg v0.8.3 // indirect
|
||||
github.com/elastic/go-windows v1.0.1 // indirect
|
||||
github.com/elastic/gosigar v0.14.2 // indirect
|
||||
github.com/fatih/color v1.9.0 // indirect
|
||||
github.com/go-ole/go-ole v1.2.5-0.20190920104607-14974a1cf647 // indirect
|
||||
github.com/go-sourcemap/sourcemap v2.1.2+incompatible // indirect
|
||||
github.com/gofrs/flock v0.7.2-0.20190320160742-5135e617513b // indirect
|
||||
github.com/gofrs/uuid v3.3.0+incompatible // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/gomodule/redigo v1.8.3 // indirect
|
||||
github.com/google/go-cmp v0.5.6 // indirect
|
||||
github.com/h2non/filetype v1.1.1 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.0 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.4 // indirect
|
||||
github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 // indirect
|
||||
github.com/jonboulle/clockwork v0.2.2 // indirect
|
||||
github.com/josephspurrier/goversioninfo v0.0.0-20190209210621-63e6d1acd3dd // indirect
|
||||
github.com/magefile/mage v1.11.0
|
||||
github.com/mattn/go-colorable v0.1.6 // indirect
|
||||
github.com/miekg/dns v1.1.25 // indirect
|
||||
github.com/mitchellh/hashstructure v0.0.0-20170116052023-ab25296c0f51 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/prometheus/procfs v0.6.0 // indirect
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
|
||||
github.com/shirou/gopsutil v3.20.12+incompatible // indirect
|
||||
github.com/spf13/cobra v1.0.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/urso/sderr v0.0.0-20210525210834-52b04e8f5c71 // indirect
|
||||
github.com/xdg/scram v1.0.3 // indirect
|
||||
go.elastic.co/apm v1.11.0 // indirect
|
||||
go.elastic.co/apm/module/apmelasticsearch v1.7.2 // indirect
|
||||
go.elastic.co/apm/module/apmhttp v1.7.2 // indirect
|
||||
go.elastic.co/ecszap v0.3.0 // indirect
|
||||
go.uber.org/atomic v1.5.0 // indirect
|
||||
go.uber.org/multierr v1.3.0 // indirect
|
||||
go.uber.org/zap v1.14.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e // indirect
|
||||
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect
|
||||
golang.org/x/net v0.0.0-20211020060615-d418f374d309 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1 // indirect
|
||||
golang.org/x/sys v0.0.0-20211102192858-4dd72447c267 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
|
||||
golang.org/x/tools v0.1.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20211021150943-2b146023228c // indirect
|
||||
google.golang.org/grpc v1.41.0 // indirect
|
||||
google.golang.org/protobuf v1.27.1 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/jcmturner/aescts.v1 v1.0.1 // indirect
|
||||
gopkg.in/jcmturner/dnsutils.v1 v1.0.1 // indirect
|
||||
gopkg.in/jcmturner/goidentity.v3 v3.0.0 // indirect
|
||||
gopkg.in/jcmturner/gokrb5.v7 v7.5.0 // indirect
|
||||
gopkg.in/jcmturner/rpc.v1 v1.1.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
howett.net/plist v0.0.0-20181124034731-591f970eefbb // indirect
|
||||
k8s.io/api v0.21.1 // indirect
|
||||
k8s.io/apimachinery v0.21.1 // indirect
|
||||
k8s.io/client-go v0.21.1 // indirect
|
||||
)
|
||||
|
||||
require github.com/elastic/beats/v7 v7.17.4
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v0.3.1 // indirect
|
||||
github.com/armon/go-radix v1.0.0 // indirect
|
||||
github.com/containerd/containerd v1.5.7 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/docker/distribution v2.8.0+incompatible // indirect
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect
|
||||
github.com/eapache/queue v1.1.0 // indirect
|
||||
github.com/go-logr/logr v0.4.0 // indirect
|
||||
github.com/google/gofuzz v1.1.0 // indirect
|
||||
github.com/googleapis/gnostic v0.4.1 // indirect
|
||||
github.com/hashicorp/errwrap v1.0.0 // indirect
|
||||
github.com/hashicorp/go-uuid v1.0.2 // indirect
|
||||
github.com/imdario/mergo v0.3.12 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/jcmturner/aescts/v2 v2.0.0 // indirect
|
||||
github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect
|
||||
github.com/jcmturner/gofork v1.0.0 // indirect
|
||||
github.com/jcmturner/gokrb5/v8 v8.4.2 // indirect
|
||||
github.com/jcmturner/rpc/v2 v2.0.3 // indirect
|
||||
github.com/json-iterator/go v1.1.11 // indirect
|
||||
github.com/klauspost/compress v1.13.6 // indirect
|
||||
github.com/mattn/go-isatty v0.0.12 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.1 // indirect
|
||||
github.com/pierrec/lz4 v2.6.0+incompatible // indirect
|
||||
github.com/santhosh-tekuri/jsonschema v1.2.4 // indirect
|
||||
github.com/sirupsen/logrus v1.8.1 // indirect
|
||||
github.com/urso/diag v0.0.0-20200210123136-21b3cc8eb797 // indirect
|
||||
github.com/urso/go-bin v0.0.0-20180220135811-781c575c9f0e // indirect
|
||||
github.com/urso/magetools v0.0.0-20190919040553-290c89e0c230 // indirect
|
||||
github.com/xdg/stringprep v1.0.3 // indirect
|
||||
go.elastic.co/fastjson v1.1.0 // indirect
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee // indirect
|
||||
golang.org/x/mod v0.4.2 // indirect
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d // indirect
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
honnef.co/go/tools v0.0.1-2020.1.4 // indirect
|
||||
k8s.io/klog/v2 v2.8.0 // indirect
|
||||
k8s.io/utils v0.0.0-20201110183641-67b214c5f920 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.1.0 // indirect
|
||||
sigs.k8s.io/yaml v1.2.0 // indirect
|
||||
)
|
||||
|
||||
replace (
|
||||
github.com/Microsoft/go-winio => github.com/bi-zone/go-winio v0.4.15
|
||||
github.com/Shopify/sarama => github.com/elastic/sarama v1.19.1-0.20210823122811-11c3ef800752
|
||||
github.com/apoydence/eachers => github.com/poy/eachers v0.0.0-20181020210610-23942921fe77 //indirect, see https://github.com/elastic/beats/pull/29780 for details.
|
||||
github.com/cucumber/godog => github.com/cucumber/godog v0.8.1
|
||||
github.com/docker/docker => github.com/docker/engine v0.0.0-20191113042239-ea84732a7725
|
||||
github.com/docker/go-plugins-helpers => github.com/elastic/go-plugins-helpers v0.0.0-20200207104224-bdf17607b79f
|
||||
github.com/dop251/goja => github.com/andrewkroh/goja v0.0.0-20190128172624-dd2ac4456e20
|
||||
github.com/dop251/goja_nodejs => github.com/dop251/goja_nodejs v0.0.0-20171011081505-adff31b136e6
|
||||
github.com/fsnotify/fsevents => github.com/elastic/fsevents v0.0.0-20181029231046-e1d381a4d270
|
||||
github.com/fsnotify/fsnotify => github.com/adriansr/fsnotify v1.4.8-0.20211018144411-a81f2b630e7c
|
||||
github.com/golang/glog => github.com/elastic/glog v1.0.1-0.20210831205241-7d8b5c89dfc4
|
||||
github.com/google/gopacket => github.com/adriansr/gopacket v1.1.18-0.20200327165309-dd62abfa8a41
|
||||
github.com/insomniacslk/dhcp => github.com/elastic/dhcp v0.0.0-20200227161230-57ec251c7eb3 // indirect
|
||||
github.com/tonistiigi/fifo => github.com/containerd/fifo v0.0.0-20190816180239-bda0ff6ed73c
|
||||
)
|
9
icingabeat.docker.yml
Normal file
9
icingabeat.docker.yml
Normal file
@ -0,0 +1,9 @@
|
||||
|
||||
processors:
|
||||
- add_cloud_metadata: ~
|
||||
- add_docker_metadata: ~
|
||||
|
||||
output.elasticsearch:
|
||||
hosts: '${ELASTICSEARCH_HOSTS:elasticsearch:9200}'
|
||||
username: '${ELASTICSEARCH_USERNAME:}'
|
||||
password: '${ELASTICSEARCH_PASSWORD:}'
|
@ -1,727 +0,0 @@
|
||||
################### Icingabeat Configuration Example #########################
|
||||
|
||||
############################# Icingabeat ######################################
|
||||
|
||||
icingabeat:
|
||||
|
||||
# Defines the Icinga API endpoint
|
||||
host: "localhost"
|
||||
|
||||
# Defines the port of the API endpoint
|
||||
port: 5665
|
||||
|
||||
# A user with sufficient permissions
|
||||
user: "icinga"
|
||||
|
||||
# Password of the user
|
||||
password: "icinga"
|
||||
|
||||
# Skip SSL verification
|
||||
skip_ssl_verify: false
|
||||
|
||||
# Icingabeat supports capturing of an evenstream and periodical polling of the
|
||||
# Icinga status data.
|
||||
eventstream:
|
||||
#
|
||||
# Decide which events to receive from the event stream.
|
||||
# The following event stream types are available:
|
||||
#
|
||||
# * CheckResult
|
||||
# * StateChange
|
||||
# * Notification
|
||||
# * AcknowledgementSet
|
||||
# * AcknowledgementCleared
|
||||
# * CommentAdded
|
||||
# * CommentRemoved
|
||||
# * DowntimeAdded
|
||||
# * DowntimeRemoved
|
||||
# * DowntimeStarted
|
||||
# * DowntimeTriggered
|
||||
#
|
||||
# To disable eventstream, leave the types empty or comment out the option
|
||||
types:
|
||||
- CheckResult
|
||||
- StateChange
|
||||
|
||||
# Event streams can be filtered by attributes using the prefix 'event.'
|
||||
#
|
||||
# Example for the CheckResult type with the exit_code set to 2:
|
||||
# filter: "event.check_result.exit_status==2"
|
||||
#
|
||||
# Example for the CheckResult type with the service matching the string
|
||||
# pattern "mysql*":
|
||||
# filter: 'match("mysql*", event.service)'
|
||||
#
|
||||
# To disable filtering set an empty string or comment out the filter option
|
||||
filter: ""
|
||||
|
||||
# Defines how fast to reconnect to the API on connection loss
|
||||
retry_interval: 10s
|
||||
|
||||
statuspoller:
|
||||
# Interval at which the status API is called. Set to 0 to disable polling.
|
||||
interval: 60s
|
||||
|
||||
#================================ General ======================================
|
||||
|
||||
# The name of the shipper that publishes the network data. It can be used to group
|
||||
# all the transactions sent by a single shipper in the web interface.
|
||||
# If this options is not defined, the hostname is used.
|
||||
#name:
|
||||
|
||||
# The tags of the shipper are included in their own field with each
|
||||
# transaction published. Tags make it easy to group servers by different
|
||||
# logical properties.
|
||||
#tags: ["service-X", "web-tier"]
|
||||
|
||||
# Optional fields that you can specify to add additional information to the
|
||||
# output. Fields can be scalar values, arrays, dictionaries, or any nested
|
||||
# combination of these.
|
||||
#fields:
|
||||
# env: staging
|
||||
|
||||
# If this option is set to true, the custom fields are stored as top-level
|
||||
# fields in the output document instead of being grouped under a fields
|
||||
# sub-dictionary. Default is false.
|
||||
#fields_under_root: false
|
||||
|
||||
# Internal queue size for single events in processing pipeline
|
||||
#queue_size: 1000
|
||||
|
||||
# The internal queue size for bulk events in the processing pipeline.
|
||||
# Do not modify this value.
|
||||
#bulk_queue_size: 0
|
||||
|
||||
# Sets the maximum number of CPUs that can be executing simultaneously. The
|
||||
# default is the number of logical CPUs available in the system.
|
||||
#max_procs:
|
||||
|
||||
#================================ Processors ===================================
|
||||
|
||||
# Processors are used to reduce the number of fields in the exported event or to
|
||||
# enhance the event with external metadata. This section defines a list of
|
||||
# processors that are applied one by one and the first one receives the initial
|
||||
# event:
|
||||
#
|
||||
# event -> filter1 -> event1 -> filter2 ->event2 ...
|
||||
#
|
||||
# The supported processors are drop_fields, drop_event, include_fields, and
|
||||
# add_cloud_metadata.
|
||||
#
|
||||
# For example, you can use the following processors to keep the fields that
|
||||
# contain CPU load percentages, but remove the fields that contain CPU ticks
|
||||
# values:
|
||||
#
|
||||
#processors:
|
||||
#- include_fields:
|
||||
# fields: ["cpu"]
|
||||
#- drop_fields:
|
||||
# fields: ["cpu.user", "cpu.system"]
|
||||
#
|
||||
# The following example drops the events that have the HTTP response code 200:
|
||||
#
|
||||
#processors:
|
||||
#- drop_event:
|
||||
# when:
|
||||
# equals:
|
||||
# http.code: 200
|
||||
#
|
||||
# The following example enriches each event with metadata from the cloud
|
||||
# provider about the host machine. It works on EC2, GCE, and DigitalOcean.
|
||||
#
|
||||
#processors:
|
||||
#- add_cloud_metadata:
|
||||
#
|
||||
|
||||
#================================ Outputs ======================================
|
||||
|
||||
# Configure what outputs to use when sending the data collected by the beat.
|
||||
# Multiple outputs may be used.
|
||||
|
||||
#-------------------------- Elasticsearch output -------------------------------
|
||||
output.elasticsearch:
|
||||
# Boolean flag to enable or disable the output module.
|
||||
#enabled: true
|
||||
|
||||
# Array of hosts to connect to.
|
||||
# Scheme and port can be left out and will be set to the default (http and 9200)
|
||||
# In case you specify and additional path, the scheme is required: http://localhost:9200/path
|
||||
# IPv6 addresses should always be defined as: https://[2001:db8::1]:9200
|
||||
hosts: ["localhost:9200"]
|
||||
|
||||
# Set gzip compression level.
|
||||
#compression_level: 0
|
||||
|
||||
# Optional protocol and basic auth credentials.
|
||||
#protocol: "https"
|
||||
#username: "elastic"
|
||||
#password: "changeme"
|
||||
|
||||
# Dictionary of HTTP parameters to pass within the url with index operations.
|
||||
#parameters:
|
||||
#param1: value1
|
||||
#param2: value2
|
||||
|
||||
# Number of workers per Elasticsearch host.
|
||||
#worker: 1
|
||||
|
||||
# Optional index name. The default is "icingabeat" plus date
|
||||
# and generates [icingabeat-]YYYY.MM.DD keys.
|
||||
#index: "icingabeat-%{+yyyy.MM.dd}"
|
||||
|
||||
# Optional ingest node pipeline. By default no pipeline will be used.
|
||||
#pipeline: ""
|
||||
|
||||
# Optional HTTP Path
|
||||
#path: "/elasticsearch"
|
||||
|
||||
# Custom HTTP headers to add to each request
|
||||
#headers:
|
||||
# X-My-Header: Contents of the header
|
||||
|
||||
# Proxy server url
|
||||
#proxy_url: http://proxy:3128
|
||||
|
||||
# The number of times a particular Elasticsearch index operation is attempted. If
|
||||
# the indexing operation doesn't succeed after this many retries, the events are
|
||||
# dropped. The default is 3.
|
||||
#max_retries: 3
|
||||
|
||||
# The maximum number of events to bulk in a single Elasticsearch bulk API index request.
|
||||
# The default is 50.
|
||||
#bulk_max_size: 50
|
||||
|
||||
# Configure http request timeout before failing an request to Elasticsearch.
|
||||
#timeout: 90
|
||||
|
||||
# The number of seconds to wait for new events between two bulk API index requests.
|
||||
# If `bulk_max_size` is reached before this interval expires, addition bulk index
|
||||
# requests are made.
|
||||
#flush_interval: 1s
|
||||
|
||||
# A template is used to set the mapping in Elasticsearch
|
||||
# By default template loading is enabled and the template is loaded.
|
||||
# These settings can be adjusted to load your own template or overwrite existing ones.
|
||||
|
||||
# Set to false to disable template loading.
|
||||
#template.enabled: true
|
||||
|
||||
# Template name. By default the template name is icingabeat.
|
||||
#template.name: "icingabeat"
|
||||
|
||||
# Path to template file
|
||||
#template.path: "${path.config}/icingabeat.template.json"
|
||||
|
||||
# Overwrite existing template
|
||||
#template.overwrite: false
|
||||
|
||||
# If set to true, icingabeat checks the Elasticsearch version at connect time, and if it
|
||||
# is 2.x, it loads the file specified by the template.versions.2x.path setting. The
|
||||
# default is true.
|
||||
#template.versions.2x.enabled: true
|
||||
|
||||
# Path to the Elasticsearch 2.x version of the template file.
|
||||
#template.versions.2x.path: "${path.config}/icingabeat.template-es2x.json"
|
||||
|
||||
# If set to true, icingabeat checks the Elasticsearch version at connect time, and if it
|
||||
# is 6.x, it loads the file specified by the template.versions.6x.path setting. The
|
||||
# default is true.
|
||||
#template.versions.6x.enabled: true
|
||||
|
||||
# Path to the Elasticsearch 6.x version of the template file.
|
||||
#template.versions.6x.path: "${path.config}/icingabeat.template-es6x.json"
|
||||
|
||||
# Use SSL settings for HTTPS. Default is true.
|
||||
#ssl.enabled: true
|
||||
|
||||
# Configure SSL verification mode. If `none` is configured, all server hosts
|
||||
# and certificates will be accepted. In this mode, SSL based connections are
|
||||
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
|
||||
# `full`.
|
||||
#ssl.verification_mode: full
|
||||
|
||||
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
|
||||
# 1.2 are enabled.
|
||||
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
|
||||
|
||||
# SSL configuration. By default is off.
|
||||
# List of root certificates for HTTPS server verifications
|
||||
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
|
||||
|
||||
# Certificate for SSL client authentication
|
||||
#ssl.certificate: "/etc/pki/client/cert.pem"
|
||||
|
||||
# Client Certificate Key
|
||||
#ssl.key: "/etc/pki/client/cert.key"
|
||||
|
||||
# Optional passphrase for decrypting the Certificate Key.
|
||||
#ssl.key_passphrase: ''
|
||||
|
||||
# Configure cipher suites to be used for SSL connections
|
||||
#ssl.cipher_suites: []
|
||||
|
||||
# Configure curve types for ECDHE based cipher suites
|
||||
#ssl.curve_types: []
|
||||
|
||||
# Configure what types of renegotiation are supported. Valid options are
|
||||
# never, once, and freely. Default is never.
|
||||
#ssl.renegotiation: never
|
||||
|
||||
|
||||
#----------------------------- Logstash output ---------------------------------
|
||||
#output.logstash:
|
||||
# Boolean flag to enable or disable the output module.
|
||||
#enabled: true
|
||||
|
||||
# The Logstash hosts
|
||||
#hosts: ["localhost:5044"]
|
||||
|
||||
# Number of workers per Logstash host.
|
||||
#worker: 1
|
||||
|
||||
# Set gzip compression level.
|
||||
#compression_level: 3
|
||||
|
||||
# Optional load balance the events between the Logstash hosts
|
||||
#loadbalance: true
|
||||
|
||||
# Number of batches to be send asynchronously to logstash while processing
|
||||
# new batches.
|
||||
#pipelining: 0
|
||||
|
||||
# If enabled only a subset of events in a batch of events is transferred per
|
||||
# transaction. The number of events to be sent increases up to `bulk_max_size`
|
||||
# if no error is encountered.
|
||||
#slow_start: false
|
||||
|
||||
# Optional index name. The default index name is set to name of the beat
|
||||
# in all lowercase.
|
||||
#index: 'icingabeat'
|
||||
|
||||
# SOCKS5 proxy server URL
|
||||
#proxy_url: socks5://user:password@socks5-server:2233
|
||||
|
||||
# Resolve names locally when using a proxy server. Defaults to false.
|
||||
#proxy_use_local_resolver: false
|
||||
|
||||
# Enable SSL support. SSL is automatically enabled, if any SSL setting is set.
|
||||
#ssl.enabled: true
|
||||
|
||||
# Configure SSL verification mode. If `none` is configured, all server hosts
|
||||
# and certificates will be accepted. In this mode, SSL based connections are
|
||||
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
|
||||
# `full`.
|
||||
#ssl.verification_mode: full
|
||||
|
||||
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
|
||||
# 1.2 are enabled.
|
||||
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
|
||||
|
||||
# Optional SSL configuration options. SSL is off by default.
|
||||
# List of root certificates for HTTPS server verifications
|
||||
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
|
||||
|
||||
# Certificate for SSL client authentication
|
||||
#ssl.certificate: "/etc/pki/client/cert.pem"
|
||||
|
||||
# Client Certificate Key
|
||||
#ssl.key: "/etc/pki/client/cert.key"
|
||||
|
||||
# Optional passphrase for decrypting the Certificate Key.
|
||||
#ssl.key_passphrase: ''
|
||||
|
||||
# Configure cipher suites to be used for SSL connections
|
||||
#ssl.cipher_suites: []
|
||||
|
||||
# Configure curve types for ECDHE based cipher suites
|
||||
#ssl.curve_types: []
|
||||
|
||||
# Configure what types of renegotiation are supported. Valid options are
|
||||
# never, once, and freely. Default is never.
|
||||
#ssl.renegotiation: never
|
||||
|
||||
#------------------------------- Kafka output ----------------------------------
|
||||
#output.kafka:
|
||||
# Boolean flag to enable or disable the output module.
|
||||
#enabled: true
|
||||
|
||||
# The list of Kafka broker addresses from where to fetch the cluster metadata.
|
||||
# The cluster metadata contain the actual Kafka brokers events are published
|
||||
# to.
|
||||
#hosts: ["localhost:9092"]
|
||||
|
||||
# The Kafka topic used for produced events. The setting can be a format string
|
||||
# using any event field. To set the topic from document type use `%{[type]}`.
|
||||
#topic: beats
|
||||
|
||||
# The Kafka event key setting. Use format string to create unique event key.
|
||||
# By default no event key will be generated.
|
||||
#key: ''
|
||||
|
||||
# The Kafka event partitioning strategy. Default hashing strategy is `hash`
|
||||
# using the `output.kafka.key` setting or randomly distributes events if
|
||||
# `output.kafka.key` is not configured.
|
||||
#partition.hash:
|
||||
# If enabled, events will only be published to partitions with reachable
|
||||
# leaders. Default is false.
|
||||
#reachable_only: false
|
||||
|
||||
# Configure alternative event field names used to compute the hash value.
|
||||
# If empty `output.kafka.key` setting will be used.
|
||||
# Default value is empty list.
|
||||
#hash: []
|
||||
|
||||
# Authentication details. Password is required if username is set.
|
||||
#username: ''
|
||||
#password: ''
|
||||
|
||||
# Kafka version icingabeat is assumed to run against. Defaults to the oldest
|
||||
# supported stable version (currently version 0.8.2.0)
|
||||
#version: 0.8.2
|
||||
|
||||
# Metadata update configuration. Metadata do contain leader information
|
||||
# deciding which broker to use when publishing.
|
||||
#metadata:
|
||||
# Max metadata request retry attempts when cluster is in middle of leader
|
||||
# election. Defaults to 3 retries.
|
||||
#retry.max: 3
|
||||
|
||||
# Waiting time between retries during leader elections. Default is 250ms.
|
||||
#retry.backoff: 250ms
|
||||
|
||||
# Refresh metadata interval. Defaults to every 10 minutes.
|
||||
#refresh_frequency: 10m
|
||||
|
||||
# The number of concurrent load-balanced Kafka output workers.
|
||||
#worker: 1
|
||||
|
||||
# The number of times to retry publishing an event after a publishing failure.
|
||||
# After the specified number of retries, the events are typically dropped.
|
||||
# Some Beats, such as Filebeat, ignore the max_retries setting and retry until
|
||||
# all events are published. Set max_retries to a value less than 0 to retry
|
||||
# until all events are published. The default is 3.
|
||||
#max_retries: 3
|
||||
|
||||
# The maximum number of events to bulk in a single Kafka request. The default
|
||||
# is 2048.
|
||||
#bulk_max_size: 2048
|
||||
|
||||
# The number of seconds to wait for responses from the Kafka brokers before
|
||||
# timing out. The default is 30s.
|
||||
#timeout: 30s
|
||||
|
||||
# The maximum duration a broker will wait for number of required ACKs. The
|
||||
# default is 10s.
|
||||
#broker_timeout: 10s
|
||||
|
||||
# The number of messages buffered for each Kafka broker. The default is 256.
|
||||
#channel_buffer_size: 256
|
||||
|
||||
# The keep-alive period for an active network connection. If 0s, keep-alives
|
||||
# are disabled. The default is 0 seconds.
|
||||
#keep_alive: 0
|
||||
|
||||
# Sets the output compression codec. Must be one of none, snappy and gzip. The
|
||||
# default is gzip.
|
||||
#compression: gzip
|
||||
|
||||
# The maximum permitted size of JSON-encoded messages. Bigger messages will be
|
||||
# dropped. The default value is 1000000 (bytes). This value should be equal to
|
||||
# or less than the broker's message.max.bytes.
|
||||
#max_message_bytes: 1000000
|
||||
|
||||
# The ACK reliability level required from broker. 0=no response, 1=wait for
|
||||
# local commit, -1=wait for all replicas to commit. The default is 1. Note:
|
||||
# If set to 0, no ACKs are returned by Kafka. Messages might be lost silently
|
||||
# on error.
|
||||
#required_acks: 1
|
||||
|
||||
# The number of seconds to wait for new events between two producer API calls.
|
||||
#flush_interval: 1s
|
||||
|
||||
# The configurable ClientID used for logging, debugging, and auditing
|
||||
# purposes. The default is "beats".
|
||||
#client_id: beats
|
||||
|
||||
# Enable SSL support. SSL is automatically enabled, if any SSL setting is set.
|
||||
#ssl.enabled: true
|
||||
|
||||
# Optional SSL configuration options. SSL is off by default.
|
||||
# List of root certificates for HTTPS server verifications
|
||||
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
|
||||
|
||||
# Configure SSL verification mode. If `none` is configured, all server hosts
|
||||
# and certificates will be accepted. In this mode, SSL based connections are
|
||||
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
|
||||
# `full`.
|
||||
#ssl.verification_mode: full
|
||||
|
||||
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
|
||||
# 1.2 are enabled.
|
||||
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
|
||||
|
||||
# Certificate for SSL client authentication
|
||||
#ssl.certificate: "/etc/pki/client/cert.pem"
|
||||
|
||||
# Client Certificate Key
|
||||
#ssl.key: "/etc/pki/client/cert.key"
|
||||
|
||||
# Optional passphrase for decrypting the Certificate Key.
|
||||
#ssl.key_passphrase: ''
|
||||
|
||||
# Configure cipher suites to be used for SSL connections
|
||||
#ssl.cipher_suites: []
|
||||
|
||||
# Configure curve types for ECDHE based cipher suites
|
||||
#ssl.curve_types: []
|
||||
|
||||
# Configure what types of renegotiation are supported. Valid options are
|
||||
# never, once, and freely. Default is never.
|
||||
#ssl.renegotiation: never
|
||||
|
||||
#------------------------------- Redis output ----------------------------------
|
||||
#output.redis:
|
||||
# Boolean flag to enable or disable the output module.
|
||||
#enabled: true
|
||||
|
||||
# The list of Redis servers to connect to. If load balancing is enabled, the
|
||||
# events are distributed to the servers in the list. If one server becomes
|
||||
# unreachable, the events are distributed to the reachable servers only.
|
||||
#hosts: ["localhost:6379"]
|
||||
|
||||
# The Redis port to use if hosts does not contain a port number. The default
|
||||
# is 6379.
|
||||
#port: 6379
|
||||
|
||||
# The name of the Redis list or channel the events are published to. The
|
||||
# default is icingabeat.
|
||||
#key: icingabeat
|
||||
|
||||
# The password to authenticate with. The default is no authentication.
|
||||
#password:
|
||||
|
||||
# The Redis database number where the events are published. The default is 0.
|
||||
#db: 0
|
||||
|
||||
# The Redis data type to use for publishing events. If the data type is list,
|
||||
# the Redis RPUSH command is used. If the data type is channel, the Redis
|
||||
# PUBLISH command is used. The default value is list.
|
||||
#datatype: list
|
||||
|
||||
# The number of workers to use for each host configured to publish events to
|
||||
# Redis. Use this setting along with the loadbalance option. For example, if
|
||||
# you have 2 hosts and 3 workers, in total 6 workers are started (3 for each
|
||||
# host).
|
||||
#worker: 1
|
||||
|
||||
# If set to true and multiple hosts or workers are configured, the output
|
||||
# plugin load balances published events onto all Redis hosts. If set to false,
|
||||
# the output plugin sends all events to only one host (determined at random)
|
||||
# and will switch to another host if the currently selected one becomes
|
||||
# unreachable. The default value is true.
|
||||
#loadbalance: true
|
||||
|
||||
# The Redis connection timeout in seconds. The default is 5 seconds.
|
||||
#timeout: 5s
|
||||
|
||||
# The number of times to retry publishing an event after a publishing failure.
|
||||
# After the specified number of retries, the events are typically dropped.
|
||||
# Some Beats, such as Filebeat, ignore the max_retries setting and retry until
|
||||
# all events are published. Set max_retries to a value less than 0 to retry
|
||||
# until all events are published. The default is 3.
|
||||
#max_retries: 3
|
||||
|
||||
# The maximum number of events to bulk in a single Redis request or pipeline.
|
||||
# The default is 2048.
|
||||
#bulk_max_size: 2048
|
||||
|
||||
# The URL of the SOCKS5 proxy to use when connecting to the Redis servers. The
|
||||
# value must be a URL with a scheme of socks5://.
|
||||
#proxy_url:
|
||||
|
||||
# This option determines whether Redis hostnames are resolved locally when
|
||||
# using a proxy. The default value is false, which means that name resolution
|
||||
# occurs on the proxy server.
|
||||
#proxy_use_local_resolver: false
|
||||
|
||||
# Enable SSL support. SSL is automatically enabled, if any SSL setting is set.
|
||||
#ssl.enabled: true
|
||||
|
||||
# Configure SSL verification mode. If `none` is configured, all server hosts
|
||||
# and certificates will be accepted. In this mode, SSL based connections are
|
||||
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
|
||||
# `full`.
|
||||
#ssl.verification_mode: full
|
||||
|
||||
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
|
||||
# 1.2 are enabled.
|
||||
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
|
||||
|
||||
# Optional SSL configuration options. SSL is off by default.
|
||||
# List of root certificates for HTTPS server verifications
|
||||
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
|
||||
|
||||
# Certificate for SSL client authentication
|
||||
#ssl.certificate: "/etc/pki/client/cert.pem"
|
||||
|
||||
# Client Certificate Key
|
||||
#ssl.key: "/etc/pki/client/cert.key"
|
||||
|
||||
# Optional passphrase for decrypting the Certificate Key.
|
||||
#ssl.key_passphrase: ''
|
||||
|
||||
# Configure cipher suites to be used for SSL connections
|
||||
#ssl.cipher_suites: []
|
||||
|
||||
# Configure curve types for ECDHE based cipher suites
|
||||
#ssl.curve_types: []
|
||||
|
||||
# Configure what types of renegotiation are supported. Valid options are
|
||||
# never, once, and freely. Default is never.
|
||||
#ssl.renegotiation: never
|
||||
|
||||
|
||||
#------------------------------- File output -----------------------------------
|
||||
#output.file:
|
||||
# Boolean flag to enable or disable the output module.
|
||||
#enabled: true
|
||||
|
||||
# Path to the directory where to save the generated files. The option is
|
||||
# mandatory.
|
||||
#path: "/tmp/icingabeat"
|
||||
|
||||
# Name of the generated files. The default is `icingabeat` and it generates
|
||||
# files: `icingabeat`, `icingabeat.1`, `icingabeat.2`, etc.
|
||||
#filename: icingabeat
|
||||
|
||||
# Maximum size in kilobytes of each file. When this size is reached, and on
|
||||
# every icingabeat restart, the files are rotated. The default value is 10240
|
||||
# kB.
|
||||
#rotate_every_kb: 10000
|
||||
|
||||
# Maximum number of files under path. When this number of files is reached,
|
||||
# the oldest file is deleted and the rest are shifted from last to first. The
|
||||
# default is 7 files.
|
||||
#number_of_files: 7
|
||||
|
||||
|
||||
#----------------------------- Console output ---------------------------------
|
||||
#output.console:
|
||||
# Boolean flag to enable or disable the output module.
|
||||
#enabled: true
|
||||
|
||||
# Pretty print json event
|
||||
#pretty: false
|
||||
|
||||
#================================= Paths ======================================
|
||||
|
||||
# The home path for the icingabeat installation. This is the default base path
|
||||
# for all other path settings and for miscellaneous files that come with the
|
||||
# distribution (for example, the sample dashboards).
|
||||
# If not set by a CLI flag or in the configuration file, the default for the
|
||||
# home path is the location of the binary.
|
||||
#path.home:
|
||||
|
||||
# The configuration path for the icingabeat installation. This is the default
|
||||
# base path for configuration files, including the main YAML configuration file
|
||||
# and the Elasticsearch template file. If not set by a CLI flag or in the
|
||||
# configuration file, the default for the configuration path is the home path.
|
||||
#path.config: ${path.home}
|
||||
|
||||
# The data path for the icingabeat installation. This is the default base path
|
||||
# for all the files in which icingabeat needs to store its data. If not set by a
|
||||
# CLI flag or in the configuration file, the default for the data path is a data
|
||||
# subdirectory inside the home path.
|
||||
#path.data: ${path.home}/data
|
||||
|
||||
# The logs path for a icingabeat installation. This is the default location for
|
||||
# the Beat's log files. If not set by a CLI flag or in the configuration file,
|
||||
# the default for the logs path is a logs subdirectory inside the home path.
|
||||
#path.logs: ${path.home}/logs
|
||||
|
||||
#============================== Dashboards =====================================
|
||||
# These settings control loading the sample dashboards to the Kibana index. Loading
|
||||
# the dashboards is disabled by default and can be enabled either by setting the
|
||||
# options here, or by using the `-setup` CLI flag.
|
||||
#dashboards.enabled: false
|
||||
|
||||
# The URL from where to download the dashboards archive. By default this URL
|
||||
# has a value which is computed based on the Beat name and version. For released
|
||||
# versions, this URL points to the dashboard archive on the artifacts.elastic.co
|
||||
# website.
|
||||
#dashboards.url:
|
||||
|
||||
# The directory from where to read the dashboards. It is used instead of the URL
|
||||
# when it has a value.
|
||||
#dashboards.directory:
|
||||
|
||||
# The file archive (zip file) from where to read the dashboards. It is used instead
|
||||
# of the URL when it has a value.
|
||||
#dashboards.file:
|
||||
|
||||
# If this option is enabled, the snapshot URL is used instead of the default URL.
|
||||
#dashboards.snapshot: false
|
||||
|
||||
# The URL from where to download the snapshot version of the dashboards. By default
|
||||
# this has a value which is computed based on the Beat name and version.
|
||||
#dashboards.snapshot_url
|
||||
|
||||
# In case the archive contains the dashboards from multiple Beats, this lets you
|
||||
# select which one to load. You can load all the dashboards in the archive by
|
||||
# setting this to the empty string.
|
||||
#dashboards.beat: icingabeat
|
||||
|
||||
# The name of the Kibana index to use for setting the configuration. Default is ".kibana"
|
||||
#dashboards.kibana_index: .kibana
|
||||
|
||||
# The Elasticsearch index name. This overwrites the index name defined in the
|
||||
# dashboards and index pattern. Example: testbeat-*
|
||||
#dashboards.index:
|
||||
|
||||
#================================ Logging ======================================
|
||||
# There are three options for the log output: syslog, file, stderr.
|
||||
# Under Windows systems, the log files are per default sent to the file output,
|
||||
# under all other system per default to syslog.
|
||||
|
||||
# Sets log level. The default log level is info.
|
||||
# Available log levels are: critical, error, warning, info, debug
|
||||
#logging.level: info
|
||||
|
||||
# Enable debug output for selected components. To enable all selectors use ["*"]
|
||||
# Other available selectors are "beat", "publish", "service"
|
||||
# Multiple selectors can be chained.
|
||||
#logging.selectors: [ ]
|
||||
|
||||
# Send all logging output to syslog. The default is false.
|
||||
#logging.to_syslog: true
|
||||
|
||||
# If enabled, icingabeat periodically logs its internal metrics that have changed
|
||||
# in the last period. For each metric that changed, the delta from the value at
|
||||
# the beginning of the period is logged. Also, the total values for
|
||||
# all non-zero internal metrics are logged on shutdown. The default is true.
|
||||
#logging.metrics.enabled: true
|
||||
|
||||
# The period after which to log the internal metrics. The default is 30s.
|
||||
#logging.metrics.period: 30s
|
||||
|
||||
# Logging to rotating files files. Set logging.to_files to false to disable logging to
|
||||
# files.
|
||||
logging.to_files: true
|
||||
logging.files:
|
||||
# Configure the path where the logs are written. The default is the logs directory
|
||||
# under the home path (the binary location).
|
||||
#path: /var/log/icingabeat
|
||||
|
||||
# The name of the files where the logs are written to.
|
||||
#name: icingabeat
|
||||
|
||||
# Configure log file size limit. If limit is reached, log file will be
|
||||
# automatically rotated
|
||||
#rotateeverybytes: 10485760 # = 10MB
|
||||
|
||||
# Number of rotated log files to keep. Oldest files will be deleted first.
|
||||
#keepfiles: 7
|
||||
|
||||
# The permissions mask to apply when rotating log files. The default value is 0600.
|
||||
# Must be a valid Unix-style file permissions mask expressed in octal notation.
|
||||
#permissions: 0600
|
File diff suppressed because it is too large
Load Diff
@ -72,7 +72,7 @@ icingabeat:
|
||||
# polling.
|
||||
statuspoller.interval: 60s
|
||||
|
||||
#================================ General =====================================
|
||||
# ================================== General ===================================
|
||||
|
||||
# The name of the shipper that publishes the network data. It can be used to group
|
||||
# all the transactions sent by a single shipper in the web interface.
|
||||
@ -87,11 +87,10 @@ icingabeat:
|
||||
#fields:
|
||||
# env: staging
|
||||
|
||||
|
||||
#============================== Dashboards =====================================
|
||||
# ================================= Dashboards =================================
|
||||
# These settings control loading the sample dashboards to the Kibana index. Loading
|
||||
# the dashboards is disabled by default and can be enabled either by setting the
|
||||
# options here, or by using the `-setup` CLI flag or the `setup` command.
|
||||
# options here or by using the `setup` command.
|
||||
#setup.dashboards.enabled: false
|
||||
|
||||
# The URL from where to download the dashboards archive. By default this URL
|
||||
@ -100,7 +99,7 @@ icingabeat:
|
||||
# website.
|
||||
#setup.dashboards.url:
|
||||
|
||||
#============================== Kibana =====================================
|
||||
# =================================== Kibana ===================================
|
||||
|
||||
# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
|
||||
# This requires a Kibana endpoint configuration.
|
||||
@ -112,9 +111,14 @@ setup.kibana:
|
||||
# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
|
||||
#host: "localhost:5601"
|
||||
|
||||
#============================= Elastic Cloud ==================================
|
||||
# Kibana Space ID
|
||||
# ID of the Kibana Space into which the dashboards should be loaded. By default,
|
||||
# the Default Space will be used.
|
||||
#space.id:
|
||||
|
||||
# These settings simplify using icingabeat with the Elastic Cloud (https://cloud.elastic.co/).
|
||||
# =============================== Elastic Cloud ================================
|
||||
|
||||
# These settings simplify using Icingabeat with the Elastic Cloud (https://cloud.elastic.co/).
|
||||
|
||||
# The cloud.id setting overwrites the `output.elasticsearch.hosts` and
|
||||
# `setup.kibana.host` options.
|
||||
@ -125,21 +129,24 @@ setup.kibana:
|
||||
# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.
|
||||
#cloud.auth:
|
||||
|
||||
#================================ Outputs =====================================
|
||||
# ================================== Outputs ===================================
|
||||
|
||||
# Configure what output to use when sending the data collected by the beat.
|
||||
|
||||
#-------------------------- Elasticsearch output ------------------------------
|
||||
# ---------------------------- Elasticsearch Output ----------------------------
|
||||
output.elasticsearch:
|
||||
# Array of hosts to connect to.
|
||||
hosts: ["localhost:9200"]
|
||||
|
||||
# Optional protocol and basic auth credentials.
|
||||
# Protocol - either `http` (default) or `https`.
|
||||
#protocol: "https"
|
||||
|
||||
# Authentication credentials - either API key or username/password.
|
||||
#api_key: "id:api_key"
|
||||
#username: "elastic"
|
||||
#password: "changeme"
|
||||
|
||||
#----------------------------- Logstash output --------------------------------
|
||||
# ------------------------------ Logstash Output -------------------------------
|
||||
#output.logstash:
|
||||
# The Logstash hosts
|
||||
#hosts: ["localhost:5044"]
|
||||
@ -154,13 +161,73 @@ output.elasticsearch:
|
||||
# Client Certificate Key
|
||||
#ssl.key: "/etc/pki/client/cert.key"
|
||||
|
||||
#================================ Logging =====================================
|
||||
# ================================= Processors =================================
|
||||
|
||||
# Configure processors to enhance or manipulate events generated by the beat.
|
||||
|
||||
processors:
|
||||
- add_host_metadata: ~
|
||||
- add_cloud_metadata: ~
|
||||
- add_docker_metadata: ~
|
||||
|
||||
|
||||
# ================================== Logging ===================================
|
||||
|
||||
# Sets log level. The default log level is info.
|
||||
# Available log levels are: critical, error, warning, info, debug
|
||||
# Available log levels are: error, warning, info, debug
|
||||
#logging.level: debug
|
||||
|
||||
# At debug level, you can selectively enable logging only for some components.
|
||||
# To enable all selectors use ["*"]. Examples of other selectors are "beat",
|
||||
# "publish", "service".
|
||||
# "publisher", "service".
|
||||
#logging.selectors: ["*"]
|
||||
|
||||
# ============================= X-Pack Monitoring ==============================
|
||||
# Icingabeat can export internal metrics to a central Elasticsearch monitoring
|
||||
# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The
|
||||
# reporting is disabled by default.
|
||||
|
||||
# Set to true to enable the monitoring reporter.
|
||||
#monitoring.enabled: false
|
||||
|
||||
# Sets the UUID of the Elasticsearch cluster under which monitoring data for this
|
||||
# Icingabeat instance will appear in the Stack Monitoring UI. If output.elasticsearch
|
||||
# is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch.
|
||||
#monitoring.cluster_uuid:
|
||||
|
||||
# Uncomment to send the metrics to Elasticsearch. Most settings from the
|
||||
# Elasticsearch output are accepted here as well.
|
||||
# Note that the settings should point to your Elasticsearch *monitoring* cluster.
|
||||
# Any setting that is not set is automatically inherited from the Elasticsearch
|
||||
# output configuration, so if you have the Elasticsearch output configured such
|
||||
# that it is pointing to your Elasticsearch monitoring cluster, you can simply
|
||||
# uncomment the following line.
|
||||
#monitoring.elasticsearch:
|
||||
|
||||
# ============================== Instrumentation ===============================
|
||||
|
||||
# Instrumentation support for the icingabeat.
|
||||
#instrumentation:
|
||||
# Set to true to enable instrumentation of icingabeat.
|
||||
#enabled: false
|
||||
|
||||
# Environment in which icingabeat is running on (eg: staging, production, etc.)
|
||||
#environment: ""
|
||||
|
||||
# APM Server hosts to report instrumentation results to.
|
||||
#hosts:
|
||||
# - http://localhost:8200
|
||||
|
||||
# API Key for the APM Server(s).
|
||||
# If api_key is set then secret_token will be ignored.
|
||||
#api_key:
|
||||
|
||||
# Secret token for the APM Server(s).
|
||||
#secret_token:
|
||||
|
||||
|
||||
# ================================= Migration ==================================
|
||||
|
||||
# This allows to enable 6.7 migration aliases
|
||||
#migration.6_to_7.enabled: true
|
||||
|
||||
|
36
include/fields.go
Normal file
36
include/fields.go
Normal file
File diff suppressed because one or more lines are too long
24
include/list.go
Normal file
24
include/list.go
Normal file
@ -0,0 +1,24 @@
|
||||
// Licensed to Elasticsearch B.V. under one or more contributor
|
||||
// license agreements. See the NOTICE file distributed with
|
||||
// this work for additional information regarding copyright
|
||||
// ownership. Elasticsearch B.V. licenses this file to you under
|
||||
// the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
// Code generated by beats/dev-tools/cmd/module_include_list/module_include_list.go - DO NOT EDIT.
|
||||
|
||||
package include
|
||||
|
||||
import (
|
||||
// Import packages that need to register themselves.
|
||||
)
|
118
magefile.go
Normal file
118
magefile.go
Normal file
@ -0,0 +1,118 @@
|
||||
// +build mage
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/magefile/mage/mg"
|
||||
|
||||
devtools "github.com/elastic/beats/v7/dev-tools/mage"
|
||||
"github.com/elastic/beats/v7/dev-tools/mage/target/build"
|
||||
"github.com/elastic/beats/v7/dev-tools/mage/target/common"
|
||||
"github.com/elastic/beats/v7/dev-tools/mage/target/pkg"
|
||||
"github.com/elastic/beats/v7/dev-tools/mage/target/unittest"
|
||||
)
|
||||
|
||||
func init() {
|
||||
devtools.SetBuildVariableSources(devtools.DefaultBeatBuildVariableSources)
|
||||
|
||||
devtools.BeatDescription = "Icingabeat fetches data from the Icinga 2 API and forwards it to Elasticsearch or Logstash."
|
||||
devtools.BeatVendor = "Icinga GmbH"
|
||||
devtools.BeatURL = "https://icinga.com/docs/icingabeat"
|
||||
devtools.BeatProjectType = devtools.CommunityProject
|
||||
devtools.CrossBuildMountModcache = true
|
||||
}
|
||||
|
||||
// Package packages the Beat for distribution.
|
||||
// Use SNAPSHOT=true to build snapshots.
|
||||
// Use PLATFORMS to control the target platforms.
|
||||
func Package() {
|
||||
start := time.Now()
|
||||
defer func() { fmt.Println("package ran for", time.Since(start)) }()
|
||||
|
||||
devtools.UseCommunityBeatPackaging()
|
||||
devtools.PackageKibanaDashboardsFromBuildDir()
|
||||
|
||||
mg.Deps(Update)
|
||||
mg.Deps(build.CrossBuild, build.CrossBuildGoDaemon)
|
||||
mg.SerialDeps(devtools.Package, pkg.PackageTest)
|
||||
}
|
||||
|
||||
// Update updates the generated files (aka make update).
|
||||
func Update() {
|
||||
mg.SerialDeps(Fields, Dashboards, Config, includeList, fieldDocs)
|
||||
}
|
||||
|
||||
// Fields generates a fields.yml for the Beat.
|
||||
func Fields() error {
|
||||
return devtools.GenerateFieldsYAML()
|
||||
}
|
||||
|
||||
// Config generates both the short/reference/docker configs.
|
||||
func Config() error {
|
||||
p := devtools.DefaultConfigFileParams()
|
||||
p.Templates = append(p.Templates, "_meta/config/*.tmpl")
|
||||
return devtools.Config(devtools.AllConfigTypes, p, ".")
|
||||
}
|
||||
|
||||
func includeList() error {
|
||||
options := devtools.DefaultIncludeListOptions()
|
||||
options.ImportDirs = []string{"protos/*"}
|
||||
options.ModuleDirs = nil
|
||||
return devtools.GenerateIncludeListGo(options)
|
||||
}
|
||||
|
||||
// Clean cleans all generated files and build artifacts.
|
||||
func Clean() error {
|
||||
return devtools.Clean()
|
||||
}
|
||||
|
||||
// Check formats code, updates generated content, check for common errors, and
|
||||
// checks for any modified files.
|
||||
func Check() {
|
||||
common.Check()
|
||||
}
|
||||
|
||||
// Fmt formats source code (.go and .py) and adds license headers.
|
||||
func Fmt() {
|
||||
common.Fmt()
|
||||
}
|
||||
|
||||
// Test runs all available tests
|
||||
func Test() {
|
||||
mg.Deps(unittest.GoUnitTest)
|
||||
}
|
||||
|
||||
// Build builds the Beat binary.
|
||||
func Build() error {
|
||||
return build.Build()
|
||||
}
|
||||
|
||||
// CrossBuild cross-builds the beat for all target platforms.
|
||||
func CrossBuild() error {
|
||||
return build.CrossBuild()
|
||||
}
|
||||
|
||||
// BuildGoDaemon builds the go-daemon binary (use crossBuildGoDaemon).
|
||||
func BuildGoDaemon() error {
|
||||
return build.BuildGoDaemon()
|
||||
}
|
||||
|
||||
// GolangCrossBuild build the Beat binary inside of the golang-builder.
|
||||
// Do not use directly, use crossBuild instead.
|
||||
func GolangCrossBuild() error {
|
||||
return build.GolangCrossBuild()
|
||||
}
|
||||
|
||||
// Fields generates fields.yml and fields.go files for the Beat.
|
||||
|
||||
func fieldDocs() error {
|
||||
return devtools.Docs.FieldDocs("fields.yml")
|
||||
}
|
||||
|
||||
// Dashboards collects all the dashboards and generates index patterns.
|
||||
func Dashboards() error {
|
||||
return devtools.KibanaDashboards("protos")
|
||||
}
|
2
main.go
2
main.go
@ -4,6 +4,8 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/icinga/icingabeat/cmd"
|
||||
|
||||
_ "github.com/icinga/icingabeat/include"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
Binary file not shown.
Before Width: | Height: | Size: 180 KiB After Width: | Height: | Size: 334 KiB |
BIN
screenshots/notifications.png
Normal file
BIN
screenshots/notifications.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 273 KiB |
Binary file not shown.
Before Width: | Height: | Size: 183 KiB |
111
vendor/github.com/elastic/beats/.appveyor.yml
generated
vendored
111
vendor/github.com/elastic/beats/.appveyor.yml
generated
vendored
@ -1,111 +0,0 @@
|
||||
# Version format
|
||||
version: "{build}"
|
||||
|
||||
# Operating system (build VM template)
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
# Environment variables
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
GVM_DL: https://github.com/andrewkroh/gvm/releases/download/v0.0.1/gvm-windows-amd64.exe
|
||||
PYWIN_DL: https://beats-files.s3.amazonaws.com/deps/pywin32-220.win32-py2.7.exe
|
||||
matrix:
|
||||
- PROJ: github.com\elastic\beats\metricbeat
|
||||
BEAT: metricbeat
|
||||
- PROJ: github.com\elastic\beats\filebeat
|
||||
BEAT: filebeat
|
||||
- PROJ: github.com\elastic\beats\winlogbeat
|
||||
BEAT: winlogbeat
|
||||
|
||||
# Custom clone folder (variables are not expanded here).
|
||||
clone_folder: c:\gopath\src\github.com\elastic\beats
|
||||
|
||||
# Cache files until appveyor.yml is modified.
|
||||
cache:
|
||||
- C:\ProgramData\chocolatey\bin -> .appveyor.yml
|
||||
- C:\ProgramData\chocolatey\lib -> .appveyor.yml
|
||||
- C:\Users\appveyor\.gvm -> .go-version
|
||||
- C:\Windows\System32\gvm.exe -> .appveyor.yml
|
||||
- C:\tools\mingw64 -> .appveyor.yml
|
||||
- C:\pywin_inst.exe -> .appveyor.yml
|
||||
|
||||
# Scripts that run after cloning repository
|
||||
install:
|
||||
- ps: >-
|
||||
if(!(Test-Path "C:\Windows\System32\gvm.exe")) {
|
||||
wget "$env:GVM_DL" -Outfile C:\Windows\System32\gvm.exe
|
||||
}
|
||||
- ps: gvm --format=powershell $(Get-Content .go-version) | Invoke-Expression
|
||||
# AppVeyor installed mingw is 32-bit only so install 64-bit version.
|
||||
- ps: >-
|
||||
if(!(Test-Path "C:\tools\mingw64\bin\gcc.exe")) {
|
||||
cinst mingw > mingw-install.txt
|
||||
Push-AppveyorArtifact mingw-install.txt
|
||||
}
|
||||
- set PATH=C:\tools\mingw64\bin;%PATH%
|
||||
- set PATH=%GOPATH%\bin;%PATH%
|
||||
- go install github.com/elastic/beats/vendor/github.com/pierrre/gotestcover
|
||||
- go version
|
||||
- go env
|
||||
# Download the PyWin32 installer if it is not cached.
|
||||
- ps: >-
|
||||
if(!(Test-Path "C:\pywin_inst.exe")) {
|
||||
(new-object net.webclient).DownloadFile("$env:PYWIN_DL", 'C:/pywin_inst.exe')
|
||||
}
|
||||
- set PYTHONPATH=C:\Python27
|
||||
- set PATH=%PYTHONPATH%;%PYTHONPATH%\Scripts;%PATH%
|
||||
- python --version
|
||||
- pip install six jinja2 nose nose-timer PyYAML redis elasticsearch
|
||||
- easy_install C:/pywin_inst.exe
|
||||
|
||||
# To run your custom scripts instead of automatic MSBuild
|
||||
build_script:
|
||||
# Compile
|
||||
- appveyor AddCompilationMessage "Starting Compile"
|
||||
- ps: cd $env:BEAT
|
||||
- go build
|
||||
- appveyor AddCompilationMessage "Compile Success" -FileName "%BEAT%.exe"
|
||||
|
||||
# To run your custom scripts instead of automatic tests
|
||||
test_script:
|
||||
# Unit tests
|
||||
- ps: Add-AppveyorTest "Unit Tests" -Outcome Running
|
||||
- mkdir build\coverage
|
||||
- gotestcover -race -coverprofile=build/coverage/integration.cov github.com/elastic/beats/%BEAT%/...
|
||||
- ps: Update-AppveyorTest "Unit Tests" -Outcome Passed
|
||||
# System tests
|
||||
- ps: Add-AppveyorTest "System tests" -Outcome Running
|
||||
- go test -race -c -cover -covermode=atomic -coverpkg ./...
|
||||
- ps: |
|
||||
if ($env:BEAT -eq "metricbeat") {
|
||||
cp .\_meta\fields.common.yml .\_meta\fields.generated.yml
|
||||
python .\scripts\fields_collector.py | out-file -append -encoding UTF8 -filepath .\_meta\fields.generated.yml
|
||||
}
|
||||
- ps: cd tests/system
|
||||
- nosetests --with-timer
|
||||
- ps: Update-AppveyorTest "System tests" -Outcome Passed
|
||||
|
||||
after_test:
|
||||
- ps: cd $env:GOPATH\src\$env:PROJ
|
||||
- python ..\dev-tools\aggregate_coverage.py -o build\coverage\system.cov .\build\system-tests\run
|
||||
- python ..\dev-tools\aggregate_coverage.py -o build\coverage\full.cov .\build\coverage
|
||||
- go tool cover -html=build\coverage\full.cov -o build\coverage\full.html
|
||||
- ps: Push-AppveyorArtifact build\coverage\full.cov
|
||||
- ps: Push-AppveyorArtifact build\coverage\full.html
|
||||
# Upload coverage report.
|
||||
- "SET PATH=C:\\Python34;C:\\Python34\\Scripts;%PATH%"
|
||||
- pip install codecov
|
||||
- ps: cd $env:GOPATH\src\github.com\elastic\beats
|
||||
- codecov -X gcov -f "%BEAT%\build\coverage\full.cov"
|
||||
|
||||
# Executes for both successful and failed builds
|
||||
on_finish:
|
||||
- ps: cd $env:GOPATH\src\$env:PROJ
|
||||
- 7z a -r system-tests-output.zip build\system-tests\run
|
||||
- ps: Push-AppveyorArtifact system-tests-output.zip
|
||||
|
||||
# To disable deployment
|
||||
deploy: off
|
||||
|
||||
# Notifications should only be setup using the AppVeyor UI so that
|
||||
# forks can be created without inheriting the settings.
|
31
vendor/github.com/elastic/beats/.editorconfig
generated
vendored
31
vendor/github.com/elastic/beats/.editorconfig
generated
vendored
@ -1,31 +0,0 @@
|
||||
# See: http://editorconfig.org
|
||||
root = true
|
||||
|
||||
[*]
|
||||
charset = utf-8
|
||||
end_of_line = lf
|
||||
insert_final_newline = true
|
||||
trim_trailing_whitespace = true
|
||||
|
||||
[.go]
|
||||
indent_size = 4
|
||||
indent_style = tab
|
||||
|
||||
[*.json]
|
||||
indent_size = 4
|
||||
indent_style = space
|
||||
|
||||
[*.py]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
|
||||
[*.yml]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
|
||||
[Makefile]
|
||||
indent_style = tab
|
||||
|
||||
[Vagrantfile]
|
||||
indent_size = 2
|
||||
indent_style = space
|
6
vendor/github.com/elastic/beats/.gitattributes
generated
vendored
6
vendor/github.com/elastic/beats/.gitattributes
generated
vendored
@ -1,6 +0,0 @@
|
||||
CHANGELOG.md merge=union
|
||||
CHANGELOG.asciidoc merge=union
|
||||
|
||||
# Keep these file types as CRLF (Windows).
|
||||
*.bat text eol=crlf
|
||||
*.cmd text eol=crlf
|
11
vendor/github.com/elastic/beats/.github/ISSUE_TEMPLATE.md
generated
vendored
11
vendor/github.com/elastic/beats/.github/ISSUE_TEMPLATE.md
generated
vendored
@ -1,11 +0,0 @@
|
||||
Please post all questions and issues on https://discuss.elastic.co/c/beats
|
||||
before opening a Github Issue. Your questions will reach a wider audience there,
|
||||
and if we confirm that there is a bug, then you can open a new issue.
|
||||
|
||||
For security vulnerabilities please only send reports to security@elastic.co.
|
||||
See https://www.elastic.co/community/security for more information.
|
||||
|
||||
For confirmed bugs, please report:
|
||||
- Version:
|
||||
- Operating System:
|
||||
- Steps to Reproduce:
|
32
vendor/github.com/elastic/beats/.gitignore
generated
vendored
32
vendor/github.com/elastic/beats/.gitignore
generated
vendored
@ -1,32 +0,0 @@
|
||||
# Directories
|
||||
/.vagrant
|
||||
/.idea
|
||||
/.vscode
|
||||
/build
|
||||
/*/data
|
||||
/*/logs
|
||||
/*/fields.yml
|
||||
/*/*.template*.json
|
||||
|
||||
# Files
|
||||
.DS_Store
|
||||
/beats.iml
|
||||
*.dev.yml
|
||||
*.generated.yml
|
||||
coverage.out
|
||||
.python-version
|
||||
beat.db
|
||||
|
||||
# Editor swap files
|
||||
*.swp
|
||||
*.swo
|
||||
*.swn
|
||||
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
||||
*.pyc
|
1
vendor/github.com/elastic/beats/.go-version
generated
vendored
1
vendor/github.com/elastic/beats/.go-version
generated
vendored
@ -1 +0,0 @@
|
||||
1.9.2
|
13
vendor/github.com/elastic/beats/.pylintrc
generated
vendored
13
vendor/github.com/elastic/beats/.pylintrc
generated
vendored
@ -1,13 +0,0 @@
|
||||
[MESSAGES CONTROL]
|
||||
|
||||
disable=too-many-lines,too-many-public-methods,too-many-statements
|
||||
|
||||
|
||||
[BASIC]
|
||||
|
||||
method-rgx=[a-z_][a-z0-9_]{2,50}$
|
||||
|
||||
|
||||
[FORMAT]
|
||||
|
||||
max-line-length=120
|
156
vendor/github.com/elastic/beats/.travis.yml
generated
vendored
156
vendor/github.com/elastic/beats/.travis.yml
generated
vendored
@ -1,156 +0,0 @@
|
||||
sudo: required
|
||||
dist: trusty
|
||||
services:
|
||||
- docker
|
||||
|
||||
language: go
|
||||
|
||||
# Make sure project can also be built on travis for clones of the repo
|
||||
go_import_path: github.com/elastic/beats
|
||||
|
||||
env:
|
||||
global:
|
||||
# Cross-compile for amd64 only to speed up testing.
|
||||
- GOX_FLAGS="-arch amd64"
|
||||
- DOCKER_COMPOSE_VERSION=1.11.1
|
||||
- GO_VERSION="$(cat .go-version)"
|
||||
- TRAVIS_ETCD_VERSION=v3.2.8
|
||||
|
||||
jobs:
|
||||
include:
|
||||
# General checks
|
||||
- os: linux
|
||||
env: TARGETS="check"
|
||||
go: $GO_VERSION
|
||||
stage: check
|
||||
|
||||
# Filebeat
|
||||
- os: linux
|
||||
env: TARGETS="-C filebeat testsuite"
|
||||
go: $GO_VERSION
|
||||
stage: test
|
||||
- os: osx
|
||||
env: TARGETS="TEST_ENVIRONMENT=0 -C filebeat testsuite"
|
||||
go: $GO_VERSION
|
||||
stage: test
|
||||
|
||||
# Heartbeat
|
||||
- os: linux
|
||||
env: TARGETS="-C heartbeat testsuite"
|
||||
go: $GO_VERSION
|
||||
stage: test
|
||||
- os: osx
|
||||
env: TARGETS="TEST_ENVIRONMENT=0 -C heartbeat testsuite"
|
||||
go: $GO_VERSION
|
||||
stage: test
|
||||
|
||||
# Auditbeat
|
||||
- os: linux
|
||||
env: TARGETS="-C auditbeat testsuite"
|
||||
go: $GO_VERSION
|
||||
stage: test
|
||||
|
||||
# Libbeat
|
||||
- os: linux
|
||||
env: TARGETS="-C libbeat testsuite"
|
||||
go: $GO_VERSION
|
||||
stage: test
|
||||
- os: linux
|
||||
env: TARGETS="-C libbeat crosscompile"
|
||||
go: $GO_VERSION
|
||||
stage: test
|
||||
|
||||
# Metricbeat
|
||||
- os: linux
|
||||
env: TARGETS="-C metricbeat testsuite"
|
||||
go: $GO_VERSION
|
||||
stage: test
|
||||
- os: osx
|
||||
env: TARGETS="TEST_ENVIRONMENT=0 -C metricbeat testsuite"
|
||||
go: $GO_VERSION
|
||||
stage: test
|
||||
- os: linux
|
||||
env: TARGETS="-C metricbeat crosscompile"
|
||||
go: $GO_VERSION
|
||||
stage: test
|
||||
|
||||
# Packetbeat
|
||||
- os: linux
|
||||
env: TARGETS="-C packetbeat testsuite"
|
||||
go: $GO_VERSION
|
||||
stage: test
|
||||
|
||||
# Winlogbeat
|
||||
- os: linux
|
||||
env: TARGETS="-C winlogbeat crosscompile"
|
||||
go: $GO_VERSION
|
||||
stage: test
|
||||
|
||||
# Generators
|
||||
- os: linux
|
||||
env: TARGETS="-C generator/metricbeat test"
|
||||
go: $GO_VERSION
|
||||
stage: test
|
||||
- os: linux
|
||||
env: TARGETS="-C generator/beat test"
|
||||
go: $GO_VERSION
|
||||
stage: test
|
||||
|
||||
# Kubernetes
|
||||
- os: linux
|
||||
install: deploy/kubernetes/.travis/setup.sh
|
||||
env:
|
||||
- TARGETS="-C deploy/kubernetes test"
|
||||
- TRAVIS_KUBE_VERSION=v1.6.11
|
||||
stage: test
|
||||
- os: linux
|
||||
install: deploy/kubernetes/.travis/setup.sh
|
||||
env:
|
||||
- TARGETS="-C deploy/kubernetes test"
|
||||
- TRAVIS_KUBE_VERSION=v1.7.7
|
||||
stage: test
|
||||
- os: linux
|
||||
install: deploy/kubernetes/.travis/setup.sh
|
||||
env:
|
||||
- TARGETS="-C deploy/kubernetes test"
|
||||
- TRAVIS_KUBE_VERSION=v1.8.0
|
||||
stage: test
|
||||
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- python-virtualenv
|
||||
- libpcap-dev
|
||||
|
||||
before_install:
|
||||
- python --version
|
||||
- umask 022
|
||||
- chmod -R go-w $GOPATH/src/github.com/elastic/beats
|
||||
# Docker-compose installation
|
||||
- sudo rm /usr/local/bin/docker-compose || true
|
||||
- curl -L https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > docker-compose
|
||||
- chmod +x docker-compose
|
||||
- sudo mv docker-compose /usr/local/bin
|
||||
|
||||
# Skips installations step
|
||||
install: true
|
||||
|
||||
script:
|
||||
- make $TARGETS
|
||||
|
||||
notifications:
|
||||
slack:
|
||||
on_success: change
|
||||
on_failure: always
|
||||
on_pull_requests: false
|
||||
rooms:
|
||||
secure: "e25J5puEA31dOooTI4T+K+zrTs8XeWIGq2cgmiPt9u/g7eqWeQj1UJnVsr8GOu1RPDyuJZJHXqfrvuOYJTdHzXbwjD0JTbwwVVZMkkZW2SWZHG46HCXPiucjWXEr3hXJKBJDDpIx6VxrN7r17dejv1biQ8QuEFZfiB1H8kbH/ho="
|
||||
|
||||
after_success:
|
||||
# Copy full.cov to coverage.txt because codecov.io requires this file
|
||||
- test -f auditbeat/build/coverage/full.cov && bash <(curl -s https://codecov.io/bash) -f auditbeat/build/coverage/full.cov
|
||||
- test -f filebeat/build/coverage/full.cov && bash <(curl -s https://codecov.io/bash) -f filebeat/build/coverage/full.cov
|
||||
- test -f heartbeat/build/coverage/full.cov && bash <(curl -s https://codecov.io/bash) -f heartbeat/build/coverage/full.cov
|
||||
- test -f libbeat/build/coverage/full.cov && bash <(curl -s https://codecov.io/bash) -f libbeat/build/coverage/full.cov
|
||||
- test -f metricbeat/build/coverage/full.cov && bash <(curl -s https://codecov.io/bash) -f metricbeat/build/coverage/full.cov
|
||||
- test -f packetbeat/build/coverage/full.cov && bash <(curl -s https://codecov.io/bash) -f packetbeat/build/coverage/full.cov
|
2406
vendor/github.com/elastic/beats/CHANGELOG.asciidoc
generated
vendored
2406
vendor/github.com/elastic/beats/CHANGELOG.asciidoc
generated
vendored
File diff suppressed because it is too large
Load Diff
17
vendor/github.com/elastic/beats/CONTRIBUTING.md
generated
vendored
17
vendor/github.com/elastic/beats/CONTRIBUTING.md
generated
vendored
@ -1,17 +0,0 @@
|
||||
Please post all questions and issues first on
|
||||
[https://discuss.elastic.co/c/beats](https://discuss.elastic.co/c/beats)
|
||||
before opening a Github Issue.
|
||||
|
||||
# Contributing to Beats
|
||||
|
||||
The Beats are open source and we love to receive contributions from our
|
||||
community — you!
|
||||
|
||||
There are many ways to contribute, from writing tutorials or blog posts,
|
||||
improving the documentation, submitting bug reports and feature requests or
|
||||
writing code for implementing a whole new protocol.
|
||||
|
||||
If you want to contribute to the Beats project, you can start by reading
|
||||
the [contributing guidelines](https://www.elastic.co/guide/en/beats/devguide/current/beats-contributing.html)
|
||||
in the _Beats Developer Guide_.
|
||||
|
13
vendor/github.com/elastic/beats/LICENSE.txt
generated
vendored
13
vendor/github.com/elastic/beats/LICENSE.txt
generated
vendored
@ -1,13 +0,0 @@
|
||||
Copyright (c) 2012–2017 Elastic <http://www.elastic.co>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
155
vendor/github.com/elastic/beats/Makefile
generated
vendored
155
vendor/github.com/elastic/beats/Makefile
generated
vendored
@ -1,155 +0,0 @@
|
||||
BUILD_DIR=$(CURDIR)/build
|
||||
COVERAGE_DIR=$(BUILD_DIR)/coverage
|
||||
BEATS=packetbeat filebeat winlogbeat metricbeat heartbeat auditbeat
|
||||
PROJECTS=libbeat $(BEATS)
|
||||
PROJECTS_ENV=libbeat filebeat metricbeat
|
||||
SNAPSHOT?=yes
|
||||
PYTHON_ENV?=$(BUILD_DIR)/python-env
|
||||
VIRTUALENV_PARAMS?=
|
||||
FIND=find . -type f -not -path "*/vendor/*" -not -path "*/build/*" -not -path "*/.git/*"
|
||||
GOLINT=golint
|
||||
GOLINT_REPO=github.com/golang/lint/golint
|
||||
REVIEWDOG=reviewdog
|
||||
REVIEWDOG_OPTIONS?=-diff "git diff master"
|
||||
REVIEWDOG_REPO=github.com/haya14busa/reviewdog/cmd/reviewdog
|
||||
|
||||
# Runs complete testsuites (unit, system, integration) for all beats with coverage and race detection.
|
||||
# Also it builds the docs and the generators
|
||||
|
||||
.PHONY: testsuite
|
||||
testsuite:
|
||||
@$(foreach var,$(PROJECTS),$(MAKE) -C $(var) testsuite || exit 1;)
|
||||
|
||||
.PHONY: setup-commit-hook
|
||||
setup-commit-hook:
|
||||
@cp script/pre_commit.sh .git/hooks/pre-commit
|
||||
@chmod 751 .git/hooks/pre-commit
|
||||
|
||||
stop-environments:
|
||||
@$(foreach var,$(PROJECTS_ENV),$(MAKE) -C $(var) stop-environment || exit 0;)
|
||||
|
||||
# Runs unit and system tests without coverage and race detection.
|
||||
.PHONY: test
|
||||
test:
|
||||
@$(foreach var,$(PROJECTS),$(MAKE) -C $(var) test || exit 1;)
|
||||
|
||||
# Runs unit tests without coverage and race detection.
|
||||
.PHONY: unit
|
||||
unit:
|
||||
@$(foreach var,$(PROJECTS),$(MAKE) -C $(var) unit || exit 1;)
|
||||
|
||||
.PHONY: coverage-report
|
||||
coverage-report:
|
||||
@mkdir -p $(COVERAGE_DIR)
|
||||
@echo 'mode: atomic' > ./$(COVERAGE_DIR)/full.cov
|
||||
@# Collects all coverage files and skips top line with mode
|
||||
@$(foreach var,$(PROJECTS),tail -q -n +2 ./$(var)/$(COVERAGE_DIR)/*.cov >> ./$(COVERAGE_DIR)/full.cov || true;)
|
||||
@go tool cover -html=./$(COVERAGE_DIR)/full.cov -o $(COVERAGE_DIR)/full.html
|
||||
@echo "Generated coverage report $(COVERAGE_DIR)/full.html"
|
||||
|
||||
.PHONY: update
|
||||
update: notice
|
||||
@$(foreach var,$(PROJECTS),$(MAKE) -C $(var) update || exit 1;)
|
||||
@$(MAKE) -C deploy/kubernetes all
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
@rm -rf build
|
||||
@$(foreach var,$(PROJECTS),$(MAKE) -C $(var) clean || exit 1;)
|
||||
@$(MAKE) -C generator clean
|
||||
|
||||
# Cleans up the vendor directory from unnecessary files
|
||||
# This should always be run after updating the dependencies
|
||||
.PHONY: clean-vendor
|
||||
clean-vendor:
|
||||
@sh script/clean_vendor.sh
|
||||
|
||||
.PHONY: check
|
||||
check: python-env
|
||||
@$(foreach var,$(PROJECTS),$(MAKE) -C $(var) check || exit 1;)
|
||||
@# Checks also python files which are not part of the beats
|
||||
@$(FIND) -name *.py -exec $(PYTHON_ENV)/bin/autopep8 -d --max-line-length 120 {} \; | (! grep . -q) || (echo "Code differs from autopep8's style" && false)
|
||||
@# Validate that all updates were committed
|
||||
@$(MAKE) update
|
||||
@git diff | cat
|
||||
@git update-index --refresh
|
||||
@git diff-index --exit-code HEAD --
|
||||
|
||||
# Corrects spelling errors
|
||||
.PHONY: misspell
|
||||
misspell:
|
||||
go get github.com/client9/misspell
|
||||
# Ignore Kibana files (.json)
|
||||
$(FIND) -not -path "*.json" -name '*' -exec misspell -w {} \;
|
||||
|
||||
.PHONY: fmt
|
||||
fmt: python-env
|
||||
@$(foreach var,$(PROJECTS),$(MAKE) -C $(var) fmt || exit 1;)
|
||||
@# Cleans also python files which are not part of the beats
|
||||
@$(FIND) -name "*.py" -exec $(PYTHON_ENV)/bin/autopep8 --in-place --max-line-length 120 {} \;
|
||||
|
||||
.PHONY: lint
|
||||
lint:
|
||||
@go get $(GOLINT_REPO) $(REVIEWDOG_REPO)
|
||||
$(REVIEWDOG) $(REVIEWDOG_OPTIONS)
|
||||
|
||||
# Collects all dashboards and generates dashboard folder for https://github.com/elastic/beats-dashboards/tree/master/dashboards
|
||||
.PHONY: beats-dashboards
|
||||
beats-dashboards:
|
||||
@mkdir -p build/dashboards
|
||||
@$(foreach var,$(BEATS),cp -r $(var)/_meta/kibana/ build/dashboards/$(var) || exit 1;)
|
||||
|
||||
# Builds the documents for each beat
|
||||
.PHONY: docs
|
||||
docs:
|
||||
@$(foreach var,$(PROJECTS),BUILD_DIR=${BUILD_DIR} $(MAKE) -C $(var) docs || exit 1;)
|
||||
sh ./script/build_docs.sh dev-guide github.com/elastic/beats/docs/devguide ${BUILD_DIR}
|
||||
|
||||
.PHONY: package
|
||||
package: update beats-dashboards
|
||||
@$(foreach var,$(BEATS),SNAPSHOT=$(SNAPSHOT) $(MAKE) -C $(var) package || exit 1;)
|
||||
|
||||
@echo "Start building the dashboards package"
|
||||
@mkdir -p build/upload/
|
||||
@BUILD_DIR=${BUILD_DIR} SNAPSHOT=$(SNAPSHOT) $(MAKE) -C dev-tools/packer package-dashboards ${BUILD_DIR}/upload/build_id.txt
|
||||
@mv build/upload build/dashboards-upload
|
||||
|
||||
@# Copy build files over to top build directory
|
||||
@mkdir -p build/upload/
|
||||
@$(foreach var,$(BEATS),cp -r $(var)/build/upload/ build/upload/$(var) || exit 1;)
|
||||
@cp -r build/dashboards-upload build/upload/dashboards
|
||||
@# Run tests on the generated packages.
|
||||
@go test ./dev-tools/package_test.go -files "${BUILD_DIR}/upload/*/*"
|
||||
|
||||
# Upload nightly builds to S3
|
||||
.PHONY: upload-nightlies-s3
|
||||
upload-nightlies-s3: all
|
||||
aws s3 cp --recursive --acl public-read build/upload s3://beats-nightlies
|
||||
|
||||
# Run after building to sign packages and publish to APT and YUM repos.
|
||||
.PHONY: package-upload
|
||||
upload-package:
|
||||
$(MAKE) -C dev-tools/packer deb-rpm-s3
|
||||
# You must export AWS_ACCESS_KEY=<AWS access> and export AWS_SECRET_KEY=<secret>
|
||||
# before running this make target.
|
||||
dev-tools/packer/docker/deb-rpm-s3/deb-rpm-s3.sh
|
||||
|
||||
.PHONY: release-upload
|
||||
upload-release:
|
||||
aws s3 cp --recursive --acl public-read build/upload s3://download.elasticsearch.org/beats/
|
||||
|
||||
.PHONY: notice
|
||||
notice: python-env
|
||||
@echo "Generating NOTICE"
|
||||
@$(PYTHON_ENV)/bin/python dev-tools/generate_notice.py .
|
||||
|
||||
# Sets up the virtual python environment
|
||||
.PHONY: python-env
|
||||
python-env:
|
||||
@test -d $(PYTHON_ENV) || virtualenv $(VIRTUALENV_PARAMS) $(PYTHON_ENV)
|
||||
@$(PYTHON_ENV)/bin/pip install -q --upgrade pip autopep8 six
|
||||
|
||||
# Tests if apm works with the current code
|
||||
.PHONY: test-apm
|
||||
test-apm:
|
||||
sh ./script/test_apm.sh
|
3959
vendor/github.com/elastic/beats/NOTICE.txt
generated
vendored
3959
vendor/github.com/elastic/beats/NOTICE.txt
generated
vendored
File diff suppressed because it is too large
Load Diff
78
vendor/github.com/elastic/beats/README.md
generated
vendored
78
vendor/github.com/elastic/beats/README.md
generated
vendored
@ -1,78 +0,0 @@
|
||||
[](https://travis-ci.org/elastic/beats)
|
||||
[](https://ci.appveyor.com/project/elastic-beats/beats/branch/master)
|
||||
[](http://goreportcard.com/report/elastic/beats)
|
||||
[](https://codecov.io/github/elastic/beats?branch=master)
|
||||
|
||||
# Beats - The Lightweight Shippers of the Elastic Stack
|
||||
|
||||
The [Beats](https://www.elastic.co/products/beats) are lightweight data
|
||||
shippers, written in Go, that you install on your servers to capture all sorts
|
||||
of operational data (think of logs, metrics, or network packet data). The Beats
|
||||
send the operational data to Elasticsearch, either directly or via Logstash, so
|
||||
it can be visualized with Kibana.
|
||||
|
||||
By "lightweight", we mean that Beats have a small installation footprint, use
|
||||
limited system resources, and have no runtime dependencies.
|
||||
|
||||
This repository contains
|
||||
[libbeat](https://github.com/elastic/beats/tree/master/libbeat), our Go
|
||||
framework for creating Beats, and all the officially supported Beats:
|
||||
|
||||
Beat | Description
|
||||
--- | ---
|
||||
[Filebeat](https://github.com/elastic/beats/tree/master/filebeat) | Tails and ships log files
|
||||
[Heartbeat](https://github.com/elastic/beats/tree/master/heartbeat) | Ping remote services for availability
|
||||
[Metricbeat](https://github.com/elastic/beats/tree/master/metricbeat) | Fetches sets of metrics from the operating system and services
|
||||
[Packetbeat](https://github.com/elastic/beats/tree/master/packetbeat) | Monitors the network and applications by sniffing packets
|
||||
[Winlogbeat](https://github.com/elastic/beats/tree/master/winlogbeat) | Fetches and ships Windows Event logs
|
||||
|
||||
In addition to the above Beats, which are officially supported by
|
||||
[Elastic](https://elastic.co), the
|
||||
community has created a set of other Beats that make use of libbeat but live
|
||||
outside of this Github repository. We maintain a list of community Beats
|
||||
[here](https://www.elastic.co/guide/en/beats/libbeat/master/community-beats.html).
|
||||
|
||||
## Documentation and Getting Started
|
||||
|
||||
You can find the documentation and getting started guides for each of the Beats
|
||||
on the [elastic.co site](https://www.elastic.co/guide/):
|
||||
|
||||
* [Beats platform](https://www.elastic.co/guide/en/beats/libbeat/current/index.html)
|
||||
* [Filebeat](https://www.elastic.co/guide/en/beats/filebeat/current/index.html)
|
||||
* [Heartbeat](https://www.elastic.co/guide/en/beats/heartbeat/current/index.html)
|
||||
* [Metricbeat](https://www.elastic.co/guide/en/beats/metricbeat/current/index.html)
|
||||
* [Packetbeat](https://www.elastic.co/guide/en/beats/packetbeat/current/index.html)
|
||||
* [Winlogbeat](https://www.elastic.co/guide/en/beats/winlogbeat/current/index.html)
|
||||
|
||||
|
||||
## Getting Help
|
||||
|
||||
If you need help or hit an issue, please start by opening a topic on our
|
||||
[discuss forums](https://discuss.elastic.co/c/beats). Please note that we
|
||||
reserve GitHub tickets for confirmed bugs and enhancement requests.
|
||||
|
||||
## Downloads
|
||||
|
||||
You can download pre-compiled Beats binaries, as well as packages for the
|
||||
supported platforms, from [this page](https://www.elastic.co/downloads/beats).
|
||||
|
||||
## Contributing
|
||||
|
||||
We'd love working with you! You can help make the Beats better in many ways:
|
||||
report issues, help us reproduce issues, fix bugs, add functionality, or even
|
||||
create your own Beat.
|
||||
|
||||
Please start by reading our [CONTRIBUTING](CONTRIBUTING.md) file.
|
||||
|
||||
If you are creating a new Beat, you don't need to submit the code to this
|
||||
repository. You can simply start working in a new repository and make use of
|
||||
the libbeat packages, by following our [developer
|
||||
guide](https://www.elastic.co/guide/en/beats/libbeat/current/new-beat.html).
|
||||
After you have a working prototype, open a pull request to add your Beat to the
|
||||
list of [community
|
||||
Beats](https://github.com/elastic/beats/blob/master/libbeat/docs/communitybeats.asciidoc).
|
||||
|
||||
## Building Beats from the Source
|
||||
|
||||
See our [CONTRIBUTING](CONTRIBUTING.md) file for information about setting up your dev
|
||||
environment to build Beats from the source.
|
137
vendor/github.com/elastic/beats/Vagrantfile
generated
vendored
137
vendor/github.com/elastic/beats/Vagrantfile
generated
vendored
@ -1,137 +0,0 @@
|
||||
### Documentation
|
||||
# This is a Vagrantfile for Beats development.
|
||||
#
|
||||
# Boxes
|
||||
# =====
|
||||
#
|
||||
# win2012
|
||||
# -------
|
||||
# This box is used as a Windows development and testing environment for Beats.
|
||||
#
|
||||
# Usage and Features:
|
||||
# - Two users exist: Administrator and Vagrant. Both have the password: vagrant
|
||||
# - Use 'vagrant ssh' to open a Windows command prompt.
|
||||
# - Use 'vagrant rdp' to open a Windows Remote Deskop session. Mac users must
|
||||
# install the Microsoft Remote Desktop Client from the App Store.
|
||||
# - There is a desktop shortcut labeled "Beats Shell" that opens a command prompt
|
||||
# to C:\Gopath\src\github.com\elastic\beats where the code is mounted.
|
||||
#
|
||||
# solaris
|
||||
# -------------------
|
||||
# - Use gmake instead of make.
|
||||
#
|
||||
# freebsd and openbsd
|
||||
# -------------------
|
||||
# - Use gmake instead of make.
|
||||
# - Folder syncing doesn't work well. Consider copying the files into the box or
|
||||
# cloning the project inside the box.
|
||||
|
||||
# Provisioning for Windows PowerShell
|
||||
$winPsProvision = <<SCRIPT
|
||||
echo 'Creating github.com\elastic in the GOPATH'
|
||||
New-Item -itemtype directory -path "C:\\Gopath\\src\\github.com\\elastic" -force
|
||||
echo "Symlinking C:\\Vagrant to C:\\Gopath\\src\\github.com\\elastic"
|
||||
cmd /c mklink /d C:\\Gopath\\src\\github.com\\elastic\\beats \\\\vboxsvr\\vagrant
|
||||
|
||||
echo "Creating Beats Shell desktop shortcut"
|
||||
$WshShell = New-Object -comObject WScript.Shell
|
||||
$Shortcut = $WshShell.CreateShortcut("$Home\\Desktop\\Beats Shell.lnk")
|
||||
$Shortcut.TargetPath = "cmd.exe"
|
||||
$Shortcut.Arguments = "/K cd /d C:\\Gopath\\src\\github.com\\elastic\\beats"
|
||||
$Shortcut.Save()
|
||||
|
||||
echo "Disable automatic updates"
|
||||
$AUSettigns = (New-Object -com "Microsoft.Update.AutoUpdate").Settings
|
||||
$AUSettigns.NotificationLevel = 1
|
||||
$AUSettigns.Save()
|
||||
SCRIPT
|
||||
|
||||
# Provisioning for Unix/Linux
|
||||
$unixProvision = <<SCRIPT
|
||||
echo 'Creating github.com/elastic in the GOPATH'
|
||||
mkdir -p ~/go/src/github.com/elastic
|
||||
echo 'Symlinking /vagrant to ~/go/src/github.com/elastic'
|
||||
cd ~/go/src/github.com/elastic
|
||||
if [ -d "/vagrant" ]; then ln -s /vagrant beats; fi
|
||||
SCRIPT
|
||||
|
||||
# Linux GVM
|
||||
$linuxGvmProvision = <<SCRIPT
|
||||
mkdir -p ~/bin
|
||||
curl -sL -o ~/bin/gvm https://github.com/andrewkroh/gvm/releases/download/v0.0.1/gvm-linux-amd64
|
||||
chmod +x ~/bin/gvm
|
||||
echo 'export PATH=~/bin:$PATH' >> ~/.bash_profile
|
||||
echo 'eval "$(gvm 1.9.2)"' >> ~/.bash_profile
|
||||
SCRIPT
|
||||
|
||||
Vagrant.configure(2) do |config|
|
||||
|
||||
# Windows Server 2012 R2
|
||||
config.vm.define "win2012", primary: true do |win2012|
|
||||
|
||||
win2012.vm.box = "https://s3.amazonaws.com/beats-files/vagrant/beats-win2012-r2-virtualbox-2016-10-28_1224.box"
|
||||
win2012.vm.guest = :windows
|
||||
|
||||
# Communicator for windows boxes
|
||||
win2012.vm.communicator = "winrm"
|
||||
|
||||
# Port forward WinRM and RDP
|
||||
win2012.vm.network :forwarded_port, guest: 22, host: 2222, id: "ssh", auto_correct: true
|
||||
win2012.vm.network :forwarded_port, guest: 3389, host: 33389, id: "rdp", auto_correct: true
|
||||
win2012.vm.network :forwarded_port, guest: 5985, host: 55985, id: "winrm", auto_correct: true
|
||||
|
||||
win2012.vm.provision "shell", inline: $winPsProvision
|
||||
end
|
||||
|
||||
# Solaris 11.2
|
||||
config.vm.define "solaris", primary: true do |solaris|
|
||||
solaris.vm.box = "https://s3.amazonaws.com/beats-files/vagrant/beats-solaris-11.2-virtualbox-2016-11-02_1603.box"
|
||||
solaris.vm.network :forwarded_port, guest: 22, host: 2223, id: "ssh", auto_correct: true
|
||||
|
||||
solaris.vm.provision "shell", inline: $unixProvision, privileged: false
|
||||
end
|
||||
|
||||
# FreeBSD 11.0
|
||||
config.vm.define "freebsd", primary: true do |freebsd|
|
||||
freebsd.vm.box = "https://s3.amazonaws.com/beats-files/vagrant/beats-freebsd-11.0-virtualbox-2016-11-02_1638.box"
|
||||
freebsd.vm.network :forwarded_port, guest: 22, host: 2224, id: "ssh", auto_correct: true
|
||||
|
||||
# Must use NFS to sync a folder on FreeBSD and this requires a host-only network.
|
||||
# To enable the /vagrant folder, set disabled to false and uncomment the private_network.
|
||||
config.vm.synced_folder ".", "/vagrant", id: "vagrant-root", :nfs => true, disabled: true
|
||||
#config.vm.network "private_network", ip: "192.168.135.18"
|
||||
|
||||
freebsd.vm.hostname = "beats-tester"
|
||||
freebsd.vm.provision "shell", inline: $unixProvision, privileged: false
|
||||
end
|
||||
|
||||
# OpenBSD 5.9-stable
|
||||
config.vm.define "openbsd", primary: true do |openbsd|
|
||||
openbsd.vm.box = "https://s3.amazonaws.com/beats-files/vagrant/beats-openbsd-5.9-current-virtualbox-2016-11-02_2007.box"
|
||||
openbsd.vm.network :forwarded_port, guest: 22, host: 2225, id: "ssh", auto_correct: true
|
||||
|
||||
config.vm.synced_folder ".", "/vagrant", type: "rsync", disabled: true
|
||||
config.vm.provider :virtualbox do |vbox|
|
||||
vbox.check_guest_additions = false
|
||||
vbox.functional_vboxsf = false
|
||||
end
|
||||
|
||||
openbsd.vm.provision "shell", inline: $unixProvision, privileged: false
|
||||
end
|
||||
|
||||
# CentOS 7
|
||||
config.vm.define "centos7", primary: true do |centos7|
|
||||
#centos7.vm.box = "http://cloud.centos.org/centos/7/vagrant/x86_64/images/CentOS-7-x86_64-Vagrant-1706_02.VirtualBox.box"
|
||||
centos7.vm.box = "ubuntu/precise64"
|
||||
centos7.vm.network :forwarded_port, guest: 22, host: 2226, id: "ssh", auto_correct: true
|
||||
|
||||
centos7.vm.provision "shell", inline: $unixProvision, privileged: false
|
||||
centos7.vm.provision "shell", inline: $linuxGvmProvision, privileged: false
|
||||
|
||||
centos7.vm.synced_folder ".", "/vagrant", type: "virtualbox"
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
10
vendor/github.com/elastic/beats/auditbeat/.gitignore
generated
vendored
10
vendor/github.com/elastic/beats/auditbeat/.gitignore
generated
vendored
@ -1,10 +0,0 @@
|
||||
build
|
||||
_meta/kibana
|
||||
_meta/beat.yml
|
||||
_meta/beat.reference.yml
|
||||
module/*/_meta/config.yml
|
||||
|
||||
/auditbeat
|
||||
/auditbeat.test
|
||||
/docs/html_docs
|
||||
|
77
vendor/github.com/elastic/beats/auditbeat/Makefile
generated
vendored
77
vendor/github.com/elastic/beats/auditbeat/Makefile
generated
vendored
@ -1,77 +0,0 @@
|
||||
BEAT_NAME=auditbeat
|
||||
BEAT_TITLE=Auditbeat
|
||||
BEAT_DESCRIPTION=Audit the activities of users and processes on your system.
|
||||
SYSTEM_TESTS=false
|
||||
TEST_ENVIRONMENT=false
|
||||
|
||||
# Path to the libbeat Makefile
|
||||
-include ../libbeat/scripts/Makefile
|
||||
|
||||
# This is called by the beats packer before building starts
|
||||
.PHONY: before-build
|
||||
before-build:
|
||||
@cat ${ES_BEATS}/auditbeat/_meta/common.p1.yml \
|
||||
<(go run scripts/generate_config.go -os windows -concat) \
|
||||
${ES_BEATS}/auditbeat/_meta/common.p2.yml \
|
||||
${ES_BEATS}/libbeat/_meta/config.yml > \
|
||||
${PREFIX}/${BEAT_NAME}-win.yml
|
||||
@cat ${ES_BEATS}/auditbeat/_meta/common.reference.yml \
|
||||
<(go run scripts/generate_config.go -os windows -concat) \
|
||||
${ES_BEATS}/libbeat/_meta/config.reference.yml > \
|
||||
${PREFIX}/${BEAT_NAME}-win.reference.yml
|
||||
|
||||
@cat ${ES_BEATS}/auditbeat/_meta/common.p1.yml \
|
||||
<(go run scripts/generate_config.go -os darwin -concat) \
|
||||
${ES_BEATS}/auditbeat/_meta/common.p2.yml \
|
||||
${ES_BEATS}/libbeat/_meta/config.yml > \
|
||||
${PREFIX}/${BEAT_NAME}-darwin.yml
|
||||
@cat ${ES_BEATS}/auditbeat/_meta/common.reference.yml \
|
||||
<(go run scripts/generate_config.go -os darwin -concat) \
|
||||
${ES_BEATS}/libbeat/_meta/config.reference.yml > \
|
||||
${PREFIX}/${BEAT_NAME}-darwin.reference.yml
|
||||
|
||||
@cat ${ES_BEATS}/auditbeat/_meta/common.p1.yml \
|
||||
<(go run scripts/generate_config.go -os linux -concat) \
|
||||
${ES_BEATS}/auditbeat/_meta/common.p2.yml \
|
||||
${ES_BEATS}/libbeat/_meta/config.yml > \
|
||||
${PREFIX}/${BEAT_NAME}-linux.yml
|
||||
@cat ${ES_BEATS}/auditbeat/_meta/common.reference.yml \
|
||||
<(go run scripts/generate_config.go -os linux -concat) \
|
||||
${ES_BEATS}/libbeat/_meta/config.reference.yml > \
|
||||
${PREFIX}/${BEAT_NAME}-linux.reference.yml
|
||||
|
||||
# Collects all dependencies and then calls update
|
||||
.PHONY: collect
|
||||
collect: fields collect-docs configs kibana
|
||||
|
||||
# Collects all module and metricset fields
|
||||
.PHONY: fields
|
||||
fields: python-env
|
||||
@mkdir -p _meta
|
||||
@cp ${ES_BEATS}/metricbeat/_meta/fields.common.yml _meta/fields.generated.yml
|
||||
@${PYTHON_ENV}/bin/python ${ES_BEATS}/metricbeat/scripts/fields_collector.py >> _meta/fields.generated.yml
|
||||
|
||||
# Collects all module configs
|
||||
.PHONY: configs
|
||||
configs: python-env
|
||||
@cat ${ES_BEATS}/auditbeat/_meta/common.p1.yml \
|
||||
<(go run scripts/generate_config.go -os linux -concat) \
|
||||
${ES_BEATS}/auditbeat/_meta/common.p2.yml > _meta/beat.yml
|
||||
@cat ${ES_BEATS}/auditbeat/_meta/common.reference.yml \
|
||||
<(go run scripts/generate_config.go -os linux -ref -concat) > _meta/beat.reference.yml
|
||||
|
||||
# Collects all module docs
|
||||
.PHONY: collect-docs
|
||||
collect-docs: python-env
|
||||
@rm -rf docs/modules
|
||||
@mkdir -p docs/modules
|
||||
@go run scripts/generate_config.go -os linux
|
||||
@${PYTHON_ENV}/bin/python ${ES_BEATS}/auditbeat/scripts/docs_collector.py --beat ${BEAT_NAME}
|
||||
|
||||
# Collects all module dashboards
|
||||
.PHONY: kibana
|
||||
kibana:
|
||||
@-rm -rf _meta/kibana/dashboard _meta/kibana/search _meta/kibana/visualization # Skip index-pattern
|
||||
@mkdir -p _meta/kibana
|
||||
@-cp -pr module/*/_meta/kibana _meta/
|
||||
|
12
vendor/github.com/elastic/beats/auditbeat/_meta/common.p1.yml
generated
vendored
12
vendor/github.com/elastic/beats/auditbeat/_meta/common.p1.yml
generated
vendored
@ -1,12 +0,0 @@
|
||||
###################### Auditbeat Configuration Example #########################
|
||||
|
||||
# This is an example configuration file highlighting only the most common
|
||||
# options. The auditbeat.reference.yml file from the same directory contains all
|
||||
# the supported options with more comments. You can use it as a reference.
|
||||
#
|
||||
# You can find the full configuration reference here:
|
||||
# https://www.elastic.co/guide/en/beats/auditbeat/index.html
|
||||
|
||||
#========================== Modules configuration =============================
|
||||
auditbeat.modules:
|
||||
|
6
vendor/github.com/elastic/beats/auditbeat/_meta/common.p2.yml
generated
vendored
6
vendor/github.com/elastic/beats/auditbeat/_meta/common.p2.yml
generated
vendored
@ -1,6 +0,0 @@
|
||||
|
||||
#==================== Elasticsearch template setting ==========================
|
||||
setup.template.settings:
|
||||
index.number_of_shards: 3
|
||||
#index.codec: best_compression
|
||||
#_source.enabled: false
|
31
vendor/github.com/elastic/beats/auditbeat/_meta/common.reference.yml
generated
vendored
31
vendor/github.com/elastic/beats/auditbeat/_meta/common.reference.yml
generated
vendored
@ -1,31 +0,0 @@
|
||||
########################## Auditbeat Configuration #############################
|
||||
|
||||
# This is a reference configuration file documenting all non-deprecated options
|
||||
# in comments. For a shorter configuration example that contains only the most
|
||||
# common options, please see auditbeat.yml in the same directory.
|
||||
#
|
||||
# You can find the full configuration reference here:
|
||||
# https://www.elastic.co/guide/en/beats/auditbeat/index.html
|
||||
|
||||
#============================ Config Reloading ================================
|
||||
|
||||
# Config reloading allows to dynamically load modules. Each file which is
|
||||
# monitored must contain one or multiple modules as a list.
|
||||
auditbeat.config.modules:
|
||||
|
||||
# Glob pattern for configuration reloading
|
||||
path: ${path.config}/conf.d/*.yml
|
||||
|
||||
# Period on which files under path should be checked for changes
|
||||
reload.period: 10s
|
||||
|
||||
# Set to true to enable config reloading
|
||||
reload.enabled: false
|
||||
|
||||
# Maximum amount of time to randomly delay the start of a metricset. Use 0 to
|
||||
# disable startup delay.
|
||||
auditbeat.max_start_delay: 10s
|
||||
|
||||
#========================== Modules configuration =============================
|
||||
auditbeat.modules:
|
||||
|
36
vendor/github.com/elastic/beats/auditbeat/_meta/fields.common.yml
generated
vendored
36
vendor/github.com/elastic/beats/auditbeat/_meta/fields.common.yml
generated
vendored
@ -1,36 +0,0 @@
|
||||
- key: common
|
||||
title: Common
|
||||
description: >
|
||||
Contains common fields available in all event types.
|
||||
fields:
|
||||
|
||||
- name: metricset.module
|
||||
description: >
|
||||
The name of the module that generated the event.
|
||||
|
||||
- name: metricset.name
|
||||
description: >
|
||||
The name of the metricset that generated the event.
|
||||
|
||||
- name: metricset.host
|
||||
description: >
|
||||
Hostname of the machine from which the metricset was collected. This
|
||||
field may not be present when the data was collected locally.
|
||||
|
||||
- name: metricset.rtt
|
||||
type: long
|
||||
required: true
|
||||
description: >
|
||||
Event round trip time in microseconds.
|
||||
|
||||
- name: metricset.namespace
|
||||
type: keyword
|
||||
description: >
|
||||
Namespace of dynamic metricsets.
|
||||
|
||||
- name: type
|
||||
required: true
|
||||
example: metricsets
|
||||
description: >
|
||||
The document type. Always set to "metricsets".
|
||||
|
872
vendor/github.com/elastic/beats/auditbeat/auditbeat.reference.yml
generated
vendored
872
vendor/github.com/elastic/beats/auditbeat/auditbeat.reference.yml
generated
vendored
@ -1,872 +0,0 @@
|
||||
########################## Auditbeat Configuration #############################
|
||||
|
||||
# This is a reference configuration file documenting all non-deprecated options
|
||||
# in comments. For a shorter configuration example that contains only the most
|
||||
# common options, please see auditbeat.yml in the same directory.
|
||||
#
|
||||
# You can find the full configuration reference here:
|
||||
# https://www.elastic.co/guide/en/beats/auditbeat/index.html
|
||||
|
||||
#============================ Config Reloading ================================
|
||||
|
||||
# Config reloading allows to dynamically load modules. Each file which is
|
||||
# monitored must contain one or multiple modules as a list.
|
||||
auditbeat.config.modules:
|
||||
|
||||
# Glob pattern for configuration reloading
|
||||
path: ${path.config}/conf.d/*.yml
|
||||
|
||||
# Period on which files under path should be checked for changes
|
||||
reload.period: 10s
|
||||
|
||||
# Set to true to enable config reloading
|
||||
reload.enabled: false
|
||||
|
||||
# Maximum amount of time to randomly delay the start of a metricset. Use 0 to
|
||||
# disable startup delay.
|
||||
auditbeat.max_start_delay: 10s
|
||||
|
||||
#========================== Modules configuration =============================
|
||||
auditbeat.modules:
|
||||
|
||||
# The kernel metricset collects events from the audit framework in the Linux
|
||||
# kernel. You need to specify audit rules for the events that you want to audit.
|
||||
- module: audit
|
||||
metricsets: [kernel]
|
||||
kernel.resolve_ids: true
|
||||
kernel.failure_mode: silent
|
||||
kernel.backlog_limit: 8196
|
||||
kernel.rate_limit: 0
|
||||
kernel.include_raw_message: false
|
||||
kernel.include_warnings: false
|
||||
kernel.audit_rules: |
|
||||
## Define audit rules here.
|
||||
## Create file watches (-w) or syscall audits (-a or -A). Uncomment these
|
||||
## examples or add your own rules.
|
||||
|
||||
## If you are on a 64 bit platform, everything should be running
|
||||
## in 64 bit mode. This rule will detect any use of the 32 bit syscalls
|
||||
## because this might be a sign of someone exploiting a hole in the 32
|
||||
## bit API.
|
||||
#-a always,exit -F arch=b32 -S all -F key=32bit-abi
|
||||
|
||||
## Executions.
|
||||
#-a always,exit -F arch=b64 -S execve,execveat -k exec
|
||||
|
||||
## External access.
|
||||
#-a always,exit -F arch=b64 -S accept,bind,connect,recvfrom -F key=external-access
|
||||
|
||||
## Identity changes.
|
||||
#-w /etc/group -p wa -k identity
|
||||
#-w /etc/passwd -p wa -k identity
|
||||
#-w /etc/gshadow -p wa -k identity
|
||||
|
||||
## Unauthorized access attempts.
|
||||
#-a always,exit -F arch=b64 -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EACCES -k access
|
||||
#-a always,exit -F arch=b64 -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EPERM -k access
|
||||
|
||||
# The file integrity metricset sends events when files are changed (created,
|
||||
# updated, deleted). The events contain file metadata and hashes.
|
||||
- module: audit
|
||||
metricsets: [file]
|
||||
file.paths:
|
||||
- /bin
|
||||
- /usr/bin
|
||||
- /sbin
|
||||
- /usr/sbin
|
||||
- /etc
|
||||
|
||||
# Scan over the configured file paths at startup and send events for new or
|
||||
# modified files since the last time Auditbeat was running.
|
||||
file.scan_at_start: true
|
||||
|
||||
# Average scan rate. This throttles the amount of CPU and I/O that Auditbeat
|
||||
# consumes at startup while scanning. Default is "50 MiB".
|
||||
file.scan_rate_per_sec: 50 MiB
|
||||
|
||||
# Limit on the size of files that will be hashed. Default is "100 MiB".
|
||||
file.max_file_size: 100 MiB
|
||||
|
||||
# Hash types to compute when the file changes. Supported types are md5, sha1,
|
||||
# sha224, sha256, sha384, sha512, sha512_224, sha512_256, sha3_224, sha3_256,
|
||||
# sha3_384 and sha3_512. Default is sha1.
|
||||
file.hash_types: [sha1]
|
||||
|
||||
|
||||
#================================ General ======================================
|
||||
|
||||
# The name of the shipper that publishes the network data. It can be used to group
|
||||
# all the transactions sent by a single shipper in the web interface.
|
||||
# If this options is not defined, the hostname is used.
|
||||
#name:
|
||||
|
||||
# The tags of the shipper are included in their own field with each
|
||||
# transaction published. Tags make it easy to group servers by different
|
||||
# logical properties.
|
||||
#tags: ["service-X", "web-tier"]
|
||||
|
||||
# Optional fields that you can specify to add additional information to the
|
||||
# output. Fields can be scalar values, arrays, dictionaries, or any nested
|
||||
# combination of these.
|
||||
#fields:
|
||||
# env: staging
|
||||
|
||||
# If this option is set to true, the custom fields are stored as top-level
|
||||
# fields in the output document instead of being grouped under a fields
|
||||
# sub-dictionary. Default is false.
|
||||
#fields_under_root: false
|
||||
|
||||
# Internal queue configuration for buffering events to be published.
|
||||
#queue:
|
||||
# Queue type by name (default 'mem')
|
||||
# The memory queue will present all available events (up to the outputs
|
||||
# bulk_max_size) to the output, the moment the output is ready to server
|
||||
# another batch of events.
|
||||
#mem:
|
||||
# Max number of events the queue can buffer.
|
||||
#events: 4096
|
||||
|
||||
# Hints the minimum number of events stored in the queue,
|
||||
# before providing a batch of events to the outputs.
|
||||
# A value of 0 (the default) ensures events are immediately available
|
||||
# to be sent to the outputs.
|
||||
#flush.min_events: 2048
|
||||
|
||||
# Maximum duration after which events are available to the outputs,
|
||||
# if the number of events stored in the queue is < min_flush_events.
|
||||
#flush.timeout: 1s
|
||||
|
||||
# Sets the maximum number of CPUs that can be executing simultaneously. The
|
||||
# default is the number of logical CPUs available in the system.
|
||||
#max_procs:
|
||||
|
||||
#================================ Processors ===================================
|
||||
|
||||
# Processors are used to reduce the number of fields in the exported event or to
|
||||
# enhance the event with external metadata. This section defines a list of
|
||||
# processors that are applied one by one and the first one receives the initial
|
||||
# event:
|
||||
#
|
||||
# event -> filter1 -> event1 -> filter2 ->event2 ...
|
||||
#
|
||||
# The supported processors are drop_fields, drop_event, include_fields, and
|
||||
# add_cloud_metadata.
|
||||
#
|
||||
# For example, you can use the following processors to keep the fields that
|
||||
# contain CPU load percentages, but remove the fields that contain CPU ticks
|
||||
# values:
|
||||
#
|
||||
#processors:
|
||||
#- include_fields:
|
||||
# fields: ["cpu"]
|
||||
#- drop_fields:
|
||||
# fields: ["cpu.user", "cpu.system"]
|
||||
#
|
||||
# The following example drops the events that have the HTTP response code 200:
|
||||
#
|
||||
#processors:
|
||||
#- drop_event:
|
||||
# when:
|
||||
# equals:
|
||||
# http.code: 200
|
||||
#
|
||||
# The following example enriches each event with metadata from the cloud
|
||||
# provider about the host machine. It works on EC2, GCE, DigitalOcean,
|
||||
# Tencent Cloud, and Alibaba Cloud.
|
||||
#
|
||||
#processors:
|
||||
#- add_cloud_metadata: ~
|
||||
#
|
||||
# The following example enriches each event with the machine's local time zone
|
||||
# offset from UTC.
|
||||
#
|
||||
#processors:
|
||||
#- add_locale:
|
||||
# format: offset
|
||||
#
|
||||
# The following example enriches each event with docker metadata, it matches
|
||||
# given fields to an existing container id and adds info from that container:
|
||||
#
|
||||
#processors:
|
||||
#- add_docker_metadata:
|
||||
# host: "unix:///var/run/docker.sock"
|
||||
# match_fields: ["system.process.cgroup.id"]
|
||||
# # To connect to Docker over TLS you must specify a client and CA certificate.
|
||||
# #ssl:
|
||||
# # certificate_authority: "/etc/pki/root/ca.pem"
|
||||
# # certificate: "/etc/pki/client/cert.pem"
|
||||
# # key: "/etc/pki/client/cert.key"
|
||||
#
|
||||
# The following example enriches each event with docker metadata, it matches
|
||||
# container id from log path available in `source` field (by default it expects
|
||||
# it to be /var/lib/docker/containers/*/*.log).
|
||||
#
|
||||
#processors:
|
||||
#- add_docker_metadata: ~
|
||||
|
||||
#============================= Elastic Cloud ==================================
|
||||
|
||||
# These settings simplify using auditbeat with the Elastic Cloud (https://cloud.elastic.co/).
|
||||
|
||||
# The cloud.id setting overwrites the `output.elasticsearch.hosts` and
|
||||
# `setup.kibana.host` options.
|
||||
# You can find the `cloud.id` in the Elastic Cloud web UI.
|
||||
#cloud.id:
|
||||
|
||||
# The cloud.auth setting overwrites the `output.elasticsearch.username` and
|
||||
# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.
|
||||
#cloud.auth:
|
||||
|
||||
#================================ Outputs ======================================
|
||||
|
||||
# Configure what output to use when sending the data collected by the beat.
|
||||
|
||||
#-------------------------- Elasticsearch output -------------------------------
|
||||
output.elasticsearch:
|
||||
# Boolean flag to enable or disable the output module.
|
||||
#enabled: true
|
||||
|
||||
# Array of hosts to connect to.
|
||||
# Scheme and port can be left out and will be set to the default (http and 9200)
|
||||
# In case you specify and additional path, the scheme is required: http://localhost:9200/path
|
||||
# IPv6 addresses should always be defined as: https://[2001:db8::1]:9200
|
||||
hosts: ["localhost:9200"]
|
||||
|
||||
# Set gzip compression level.
|
||||
#compression_level: 0
|
||||
|
||||
# Optional protocol and basic auth credentials.
|
||||
#protocol: "https"
|
||||
#username: "elastic"
|
||||
#password: "changeme"
|
||||
|
||||
# Dictionary of HTTP parameters to pass within the url with index operations.
|
||||
#parameters:
|
||||
#param1: value1
|
||||
#param2: value2
|
||||
|
||||
# Number of workers per Elasticsearch host.
|
||||
#worker: 1
|
||||
|
||||
# Optional index name. The default is "auditbeat" plus date
|
||||
# and generates [auditbeat-]YYYY.MM.DD keys.
|
||||
# In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly.
|
||||
#index: "auditbeat-%{[beat.version]}-%{+yyyy.MM.dd}"
|
||||
|
||||
# Optional ingest node pipeline. By default no pipeline will be used.
|
||||
#pipeline: ""
|
||||
|
||||
# Optional HTTP Path
|
||||
#path: "/elasticsearch"
|
||||
|
||||
# Custom HTTP headers to add to each request
|
||||
#headers:
|
||||
# X-My-Header: Contents of the header
|
||||
|
||||
# Proxy server url
|
||||
#proxy_url: http://proxy:3128
|
||||
|
||||
# The number of times a particular Elasticsearch index operation is attempted. If
|
||||
# the indexing operation doesn't succeed after this many retries, the events are
|
||||
# dropped. The default is 3.
|
||||
#max_retries: 3
|
||||
|
||||
# The maximum number of events to bulk in a single Elasticsearch bulk API index request.
|
||||
# The default is 50.
|
||||
#bulk_max_size: 50
|
||||
|
||||
# Configure http request timeout before failing an request to Elasticsearch.
|
||||
#timeout: 90
|
||||
|
||||
# Use SSL settings for HTTPS. Default is true.
|
||||
#ssl.enabled: true
|
||||
|
||||
# Configure SSL verification mode. If `none` is configured, all server hosts
|
||||
# and certificates will be accepted. In this mode, SSL based connections are
|
||||
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
|
||||
# `full`.
|
||||
#ssl.verification_mode: full
|
||||
|
||||
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
|
||||
# 1.2 are enabled.
|
||||
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
|
||||
|
||||
# SSL configuration. By default is off.
|
||||
# List of root certificates for HTTPS server verifications
|
||||
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
|
||||
|
||||
# Certificate for SSL client authentication
|
||||
#ssl.certificate: "/etc/pki/client/cert.pem"
|
||||
|
||||
# Client Certificate Key
|
||||
#ssl.key: "/etc/pki/client/cert.key"
|
||||
|
||||
# Optional passphrase for decrypting the Certificate Key.
|
||||
#ssl.key_passphrase: ''
|
||||
|
||||
# Configure cipher suites to be used for SSL connections
|
||||
#ssl.cipher_suites: []
|
||||
|
||||
# Configure curve types for ECDHE based cipher suites
|
||||
#ssl.curve_types: []
|
||||
|
||||
# Configure what types of renegotiation are supported. Valid options are
|
||||
# never, once, and freely. Default is never.
|
||||
#ssl.renegotiation: never
|
||||
|
||||
|
||||
#----------------------------- Logstash output ---------------------------------
|
||||
#output.logstash:
|
||||
# Boolean flag to enable or disable the output module.
|
||||
#enabled: true
|
||||
|
||||
# The Logstash hosts
|
||||
#hosts: ["localhost:5044"]
|
||||
|
||||
# Number of workers per Logstash host.
|
||||
#worker: 1
|
||||
|
||||
# Set gzip compression level.
|
||||
#compression_level: 3
|
||||
|
||||
# Optional maximum time to live for a connection to Logstash, after which the
|
||||
# connection will be re-established. A value of `0s` (the default) will
|
||||
# disable this feature.
|
||||
#
|
||||
# Not yet supported for async connections (i.e. with the "pipelining" option set)
|
||||
#ttl: 30s
|
||||
|
||||
# Optional load balance the events between the Logstash hosts. Default is false.
|
||||
#loadbalance: false
|
||||
|
||||
# Number of batches to be sent asynchronously to logstash while processing
|
||||
# new batches.
|
||||
#pipelining: 5
|
||||
|
||||
# If enabled only a subset of events in a batch of events is transferred per
|
||||
# transaction. The number of events to be sent increases up to `bulk_max_size`
|
||||
# if no error is encountered.
|
||||
#slow_start: false
|
||||
|
||||
# Optional index name. The default index name is set to auditbeat
|
||||
# in all lowercase.
|
||||
#index: 'auditbeat'
|
||||
|
||||
# SOCKS5 proxy server URL
|
||||
#proxy_url: socks5://user:password@socks5-server:2233
|
||||
|
||||
# Resolve names locally when using a proxy server. Defaults to false.
|
||||
#proxy_use_local_resolver: false
|
||||
|
||||
# Enable SSL support. SSL is automatically enabled, if any SSL setting is set.
|
||||
#ssl.enabled: true
|
||||
|
||||
# Configure SSL verification mode. If `none` is configured, all server hosts
|
||||
# and certificates will be accepted. In this mode, SSL based connections are
|
||||
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
|
||||
# `full`.
|
||||
#ssl.verification_mode: full
|
||||
|
||||
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
|
||||
# 1.2 are enabled.
|
||||
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
|
||||
|
||||
# Optional SSL configuration options. SSL is off by default.
|
||||
# List of root certificates for HTTPS server verifications
|
||||
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
|
||||
|
||||
# Certificate for SSL client authentication
|
||||
#ssl.certificate: "/etc/pki/client/cert.pem"
|
||||
|
||||
# Client Certificate Key
|
||||
#ssl.key: "/etc/pki/client/cert.key"
|
||||
|
||||
# Optional passphrase for decrypting the Certificate Key.
|
||||
#ssl.key_passphrase: ''
|
||||
|
||||
# Configure cipher suites to be used for SSL connections
|
||||
#ssl.cipher_suites: []
|
||||
|
||||
# Configure curve types for ECDHE based cipher suites
|
||||
#ssl.curve_types: []
|
||||
|
||||
# Configure what types of renegotiation are supported. Valid options are
|
||||
# never, once, and freely. Default is never.
|
||||
#ssl.renegotiation: never
|
||||
|
||||
#------------------------------- Kafka output ----------------------------------
|
||||
#output.kafka:
|
||||
# Boolean flag to enable or disable the output module.
|
||||
#enabled: true
|
||||
|
||||
# The list of Kafka broker addresses from where to fetch the cluster metadata.
|
||||
# The cluster metadata contain the actual Kafka brokers events are published
|
||||
# to.
|
||||
#hosts: ["localhost:9092"]
|
||||
|
||||
# The Kafka topic used for produced events. The setting can be a format string
|
||||
# using any event field. To set the topic from document type use `%{[type]}`.
|
||||
#topic: beats
|
||||
|
||||
# The Kafka event key setting. Use format string to create unique event key.
|
||||
# By default no event key will be generated.
|
||||
#key: ''
|
||||
|
||||
# The Kafka event partitioning strategy. Default hashing strategy is `hash`
|
||||
# using the `output.kafka.key` setting or randomly distributes events if
|
||||
# `output.kafka.key` is not configured.
|
||||
#partition.hash:
|
||||
# If enabled, events will only be published to partitions with reachable
|
||||
# leaders. Default is false.
|
||||
#reachable_only: false
|
||||
|
||||
# Configure alternative event field names used to compute the hash value.
|
||||
# If empty `output.kafka.key` setting will be used.
|
||||
# Default value is empty list.
|
||||
#hash: []
|
||||
|
||||
# Authentication details. Password is required if username is set.
|
||||
#username: ''
|
||||
#password: ''
|
||||
|
||||
# Kafka version auditbeat is assumed to run against. Defaults to the oldest
|
||||
# supported stable version (currently version 0.8.2.0)
|
||||
#version: 0.8.2
|
||||
|
||||
# Metadata update configuration. Metadata do contain leader information
|
||||
# deciding which broker to use when publishing.
|
||||
#metadata:
|
||||
# Max metadata request retry attempts when cluster is in middle of leader
|
||||
# election. Defaults to 3 retries.
|
||||
#retry.max: 3
|
||||
|
||||
# Waiting time between retries during leader elections. Default is 250ms.
|
||||
#retry.backoff: 250ms
|
||||
|
||||
# Refresh metadata interval. Defaults to every 10 minutes.
|
||||
#refresh_frequency: 10m
|
||||
|
||||
# The number of concurrent load-balanced Kafka output workers.
|
||||
#worker: 1
|
||||
|
||||
# The number of times to retry publishing an event after a publishing failure.
|
||||
# After the specified number of retries, the events are typically dropped.
|
||||
# Some Beats, such as Filebeat, ignore the max_retries setting and retry until
|
||||
# all events are published. Set max_retries to a value less than 0 to retry
|
||||
# until all events are published. The default is 3.
|
||||
#max_retries: 3
|
||||
|
||||
# The maximum number of events to bulk in a single Kafka request. The default
|
||||
# is 2048.
|
||||
#bulk_max_size: 2048
|
||||
|
||||
# The number of seconds to wait for responses from the Kafka brokers before
|
||||
# timing out. The default is 30s.
|
||||
#timeout: 30s
|
||||
|
||||
# The maximum duration a broker will wait for number of required ACKs. The
|
||||
# default is 10s.
|
||||
#broker_timeout: 10s
|
||||
|
||||
# The number of messages buffered for each Kafka broker. The default is 256.
|
||||
#channel_buffer_size: 256
|
||||
|
||||
# The keep-alive period for an active network connection. If 0s, keep-alives
|
||||
# are disabled. The default is 0 seconds.
|
||||
#keep_alive: 0
|
||||
|
||||
# Sets the output compression codec. Must be one of none, snappy and gzip. The
|
||||
# default is gzip.
|
||||
#compression: gzip
|
||||
|
||||
# The maximum permitted size of JSON-encoded messages. Bigger messages will be
|
||||
# dropped. The default value is 1000000 (bytes). This value should be equal to
|
||||
# or less than the broker's message.max.bytes.
|
||||
#max_message_bytes: 1000000
|
||||
|
||||
# The ACK reliability level required from broker. 0=no response, 1=wait for
|
||||
# local commit, -1=wait for all replicas to commit. The default is 1. Note:
|
||||
# If set to 0, no ACKs are returned by Kafka. Messages might be lost silently
|
||||
# on error.
|
||||
#required_acks: 1
|
||||
|
||||
# The configurable ClientID used for logging, debugging, and auditing
|
||||
# purposes. The default is "beats".
|
||||
#client_id: beats
|
||||
|
||||
# Enable SSL support. SSL is automatically enabled, if any SSL setting is set.
|
||||
#ssl.enabled: true
|
||||
|
||||
# Optional SSL configuration options. SSL is off by default.
|
||||
# List of root certificates for HTTPS server verifications
|
||||
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
|
||||
|
||||
# Configure SSL verification mode. If `none` is configured, all server hosts
|
||||
# and certificates will be accepted. In this mode, SSL based connections are
|
||||
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
|
||||
# `full`.
|
||||
#ssl.verification_mode: full
|
||||
|
||||
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
|
||||
# 1.2 are enabled.
|
||||
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
|
||||
|
||||
# Certificate for SSL client authentication
|
||||
#ssl.certificate: "/etc/pki/client/cert.pem"
|
||||
|
||||
# Client Certificate Key
|
||||
#ssl.key: "/etc/pki/client/cert.key"
|
||||
|
||||
# Optional passphrase for decrypting the Certificate Key.
|
||||
#ssl.key_passphrase: ''
|
||||
|
||||
# Configure cipher suites to be used for SSL connections
|
||||
#ssl.cipher_suites: []
|
||||
|
||||
# Configure curve types for ECDHE based cipher suites
|
||||
#ssl.curve_types: []
|
||||
|
||||
# Configure what types of renegotiation are supported. Valid options are
|
||||
# never, once, and freely. Default is never.
|
||||
#ssl.renegotiation: never
|
||||
|
||||
#------------------------------- Redis output ----------------------------------
|
||||
#output.redis:
|
||||
# Boolean flag to enable or disable the output module.
|
||||
#enabled: true
|
||||
|
||||
# The list of Redis servers to connect to. If load balancing is enabled, the
|
||||
# events are distributed to the servers in the list. If one server becomes
|
||||
# unreachable, the events are distributed to the reachable servers only.
|
||||
#hosts: ["localhost:6379"]
|
||||
|
||||
# The Redis port to use if hosts does not contain a port number. The default
|
||||
# is 6379.
|
||||
#port: 6379
|
||||
|
||||
# The name of the Redis list or channel the events are published to. The
|
||||
# default is auditbeat.
|
||||
#key: auditbeat
|
||||
|
||||
# The password to authenticate with. The default is no authentication.
|
||||
#password:
|
||||
|
||||
# The Redis database number where the events are published. The default is 0.
|
||||
#db: 0
|
||||
|
||||
# The Redis data type to use for publishing events. If the data type is list,
|
||||
# the Redis RPUSH command is used. If the data type is channel, the Redis
|
||||
# PUBLISH command is used. The default value is list.
|
||||
#datatype: list
|
||||
|
||||
# The number of workers to use for each host configured to publish events to
|
||||
# Redis. Use this setting along with the loadbalance option. For example, if
|
||||
# you have 2 hosts and 3 workers, in total 6 workers are started (3 for each
|
||||
# host).
|
||||
#worker: 1
|
||||
|
||||
# If set to true and multiple hosts or workers are configured, the output
|
||||
# plugin load balances published events onto all Redis hosts. If set to false,
|
||||
# the output plugin sends all events to only one host (determined at random)
|
||||
# and will switch to another host if the currently selected one becomes
|
||||
# unreachable. The default value is true.
|
||||
#loadbalance: true
|
||||
|
||||
# The Redis connection timeout in seconds. The default is 5 seconds.
|
||||
#timeout: 5s
|
||||
|
||||
# The number of times to retry publishing an event after a publishing failure.
|
||||
# After the specified number of retries, the events are typically dropped.
|
||||
# Some Beats, such as Filebeat, ignore the max_retries setting and retry until
|
||||
# all events are published. Set max_retries to a value less than 0 to retry
|
||||
# until all events are published. The default is 3.
|
||||
#max_retries: 3
|
||||
|
||||
# The maximum number of events to bulk in a single Redis request or pipeline.
|
||||
# The default is 2048.
|
||||
#bulk_max_size: 2048
|
||||
|
||||
# The URL of the SOCKS5 proxy to use when connecting to the Redis servers. The
|
||||
# value must be a URL with a scheme of socks5://.
|
||||
#proxy_url:
|
||||
|
||||
# This option determines whether Redis hostnames are resolved locally when
|
||||
# using a proxy. The default value is false, which means that name resolution
|
||||
# occurs on the proxy server.
|
||||
#proxy_use_local_resolver: false
|
||||
|
||||
# Enable SSL support. SSL is automatically enabled, if any SSL setting is set.
|
||||
#ssl.enabled: true
|
||||
|
||||
# Configure SSL verification mode. If `none` is configured, all server hosts
|
||||
# and certificates will be accepted. In this mode, SSL based connections are
|
||||
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
|
||||
# `full`.
|
||||
#ssl.verification_mode: full
|
||||
|
||||
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
|
||||
# 1.2 are enabled.
|
||||
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
|
||||
|
||||
# Optional SSL configuration options. SSL is off by default.
|
||||
# List of root certificates for HTTPS server verifications
|
||||
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
|
||||
|
||||
# Certificate for SSL client authentication
|
||||
#ssl.certificate: "/etc/pki/client/cert.pem"
|
||||
|
||||
# Client Certificate Key
|
||||
#ssl.key: "/etc/pki/client/cert.key"
|
||||
|
||||
# Optional passphrase for decrypting the Certificate Key.
|
||||
#ssl.key_passphrase: ''
|
||||
|
||||
# Configure cipher suites to be used for SSL connections
|
||||
#ssl.cipher_suites: []
|
||||
|
||||
# Configure curve types for ECDHE based cipher suites
|
||||
#ssl.curve_types: []
|
||||
|
||||
# Configure what types of renegotiation are supported. Valid options are
|
||||
# never, once, and freely. Default is never.
|
||||
#ssl.renegotiation: never
|
||||
|
||||
#------------------------------- File output -----------------------------------
|
||||
#output.file:
|
||||
# Boolean flag to enable or disable the output module.
|
||||
#enabled: true
|
||||
|
||||
# Path to the directory where to save the generated files. The option is
|
||||
# mandatory.
|
||||
#path: "/tmp/auditbeat"
|
||||
|
||||
# Name of the generated files. The default is `auditbeat` and it generates
|
||||
# files: `auditbeat`, `auditbeat.1`, `auditbeat.2`, etc.
|
||||
#filename: auditbeat
|
||||
|
||||
# Maximum size in kilobytes of each file. When this size is reached, and on
|
||||
# every auditbeat restart, the files are rotated. The default value is 10240
|
||||
# kB.
|
||||
#rotate_every_kb: 10000
|
||||
|
||||
# Maximum number of files under path. When this number of files is reached,
|
||||
# the oldest file is deleted and the rest are shifted from last to first. The
|
||||
# default is 7 files.
|
||||
#number_of_files: 7
|
||||
|
||||
# Permissions to use for file creation. The default is 0600.
|
||||
#permissions: 0600
|
||||
|
||||
|
||||
#----------------------------- Console output ---------------------------------
|
||||
#output.console:
|
||||
# Boolean flag to enable or disable the output module.
|
||||
#enabled: true
|
||||
|
||||
# Pretty print json event
|
||||
#pretty: false
|
||||
|
||||
#================================= Paths ======================================
|
||||
|
||||
# The home path for the auditbeat installation. This is the default base path
|
||||
# for all other path settings and for miscellaneous files that come with the
|
||||
# distribution (for example, the sample dashboards).
|
||||
# If not set by a CLI flag or in the configuration file, the default for the
|
||||
# home path is the location of the binary.
|
||||
#path.home:
|
||||
|
||||
# The configuration path for the auditbeat installation. This is the default
|
||||
# base path for configuration files, including the main YAML configuration file
|
||||
# and the Elasticsearch template file. If not set by a CLI flag or in the
|
||||
# configuration file, the default for the configuration path is the home path.
|
||||
#path.config: ${path.home}
|
||||
|
||||
# The data path for the auditbeat installation. This is the default base path
|
||||
# for all the files in which auditbeat needs to store its data. If not set by a
|
||||
# CLI flag or in the configuration file, the default for the data path is a data
|
||||
# subdirectory inside the home path.
|
||||
#path.data: ${path.home}/data
|
||||
|
||||
# The logs path for a auditbeat installation. This is the default location for
|
||||
# the Beat's log files. If not set by a CLI flag or in the configuration file,
|
||||
# the default for the logs path is a logs subdirectory inside the home path.
|
||||
#path.logs: ${path.home}/logs
|
||||
|
||||
#============================== Dashboards =====================================
|
||||
# These settings control loading the sample dashboards to the Kibana index. Loading
|
||||
# the dashboards are disabled by default and can be enabled either by setting the
|
||||
# options here, or by using the `-setup` CLI flag or the `setup` command.
|
||||
#setup.dashboards.enabled: false
|
||||
|
||||
# The directory from where to read the dashboards. The default is the `kibana`
|
||||
# folder in the home path.
|
||||
#setup.dashboards.directory: ${path.home}/kibana
|
||||
|
||||
# The URL from where to download the dashboards archive. It is used instead of
|
||||
# the directory if it has a value.
|
||||
#setup.dashboards.url:
|
||||
|
||||
# The file archive (zip file) from where to read the dashboards. It is used instead
|
||||
# of the directory when it has a value.
|
||||
#setup.dashboards.file:
|
||||
|
||||
# In case the archive contains the dashboards from multiple Beats, this lets you
|
||||
# select which one to load. You can load all the dashboards in the archive by
|
||||
# setting this to the empty string.
|
||||
#setup.dashboards.beat: auditbeat
|
||||
|
||||
# The name of the Kibana index to use for setting the configuration. Default is ".kibana"
|
||||
#setup.dashboards.kibana_index: .kibana
|
||||
|
||||
# The Elasticsearch index name. This overwrites the index name defined in the
|
||||
# dashboards and index pattern. Example: testbeat-*
|
||||
#setup.dashboards.index:
|
||||
|
||||
# Always use the Kibana API for loading the dashboards instead of autodetecting
|
||||
# how to install the dashboards by first querying Elasticsearch.
|
||||
#setup.dashboards.always_kibana: false
|
||||
|
||||
#============================== Template =====================================
|
||||
|
||||
# A template is used to set the mapping in Elasticsearch
|
||||
# By default template loading is enabled and the template is loaded.
|
||||
# These settings can be adjusted to load your own template or overwrite existing ones.
|
||||
|
||||
# Set to false to disable template loading.
|
||||
#setup.template.enabled: true
|
||||
|
||||
# Template name. By default the template name is "auditbeat-%{[beat.version]}"
|
||||
# The template name and pattern has to be set in case the elasticsearch index pattern is modified.
|
||||
#setup.template.name: "auditbeat-%{[beat.version]}"
|
||||
|
||||
# Template pattern. By default the template pattern is "-%{[beat.version]}-*" to apply to the default index settings.
|
||||
# The first part is the version of the beat and then -* is used to match all daily indices.
|
||||
# The template name and pattern has to be set in case the elasticsearch index pattern is modified.
|
||||
#setup.template.pattern: "auditbeat-%{[beat.version]}-*"
|
||||
|
||||
# Path to fields.yml file to generate the template
|
||||
#setup.template.fields: "${path.config}/fields.yml"
|
||||
|
||||
# Overwrite existing template
|
||||
#setup.template.overwrite: false
|
||||
|
||||
# Elasticsearch template settings
|
||||
setup.template.settings:
|
||||
|
||||
# A dictionary of settings to place into the settings.index dictionary
|
||||
# of the Elasticsearch template. For more details, please check
|
||||
# https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html
|
||||
#index:
|
||||
#number_of_shards: 1
|
||||
#codec: best_compression
|
||||
#number_of_routing_shards: 30
|
||||
|
||||
# A dictionary of settings for the _source field. For more details, please check
|
||||
# https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html
|
||||
#_source:
|
||||
#enabled: false
|
||||
|
||||
#============================== Kibana =====================================
|
||||
|
||||
# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
|
||||
# This requires a Kibana endpoint configuration.
|
||||
setup.kibana:
|
||||
|
||||
# Kibana Host
|
||||
# Scheme and port can be left out and will be set to the default (http and 5601)
|
||||
# In case you specify and additional path, the scheme is required: http://localhost:5601/path
|
||||
# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
|
||||
#host: "localhost:5601"
|
||||
|
||||
# Optional protocol and basic auth credentials.
|
||||
#protocol: "https"
|
||||
#username: "elastic"
|
||||
#password: "changeme"
|
||||
|
||||
# Optional HTTP Path
|
||||
#path: ""
|
||||
|
||||
# Use SSL settings for HTTPS. Default is true.
|
||||
#ssl.enabled: true
|
||||
|
||||
# Configure SSL verification mode. If `none` is configured, all server hosts
|
||||
# and certificates will be accepted. In this mode, SSL based connections are
|
||||
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
|
||||
# `full`.
|
||||
#ssl.verification_mode: full
|
||||
|
||||
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
|
||||
# 1.2 are enabled.
|
||||
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
|
||||
|
||||
# SSL configuration. By default is off.
|
||||
# List of root certificates for HTTPS server verifications
|
||||
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
|
||||
|
||||
# Certificate for SSL client authentication
|
||||
#ssl.certificate: "/etc/pki/client/cert.pem"
|
||||
|
||||
# Client Certificate Key
|
||||
#ssl.key: "/etc/pki/client/cert.key"
|
||||
|
||||
# Optional passphrase for decrypting the Certificate Key.
|
||||
#ssl.key_passphrase: ''
|
||||
|
||||
# Configure cipher suites to be used for SSL connections
|
||||
#ssl.cipher_suites: []
|
||||
|
||||
# Configure curve types for ECDHE based cipher suites
|
||||
#ssl.curve_types: []
|
||||
|
||||
|
||||
|
||||
#================================ Logging ======================================
|
||||
# There are three options for the log output: syslog, file, stderr.
|
||||
# Under Windows systems, the log files are per default sent to the file output,
|
||||
# under all other system per default to syslog.
|
||||
|
||||
# Sets log level. The default log level is info.
|
||||
# Available log levels are: critical, error, warning, info, debug
|
||||
#logging.level: info
|
||||
|
||||
# Enable debug output for selected components. To enable all selectors use ["*"]
|
||||
# Other available selectors are "beat", "publish", "service"
|
||||
# Multiple selectors can be chained.
|
||||
#logging.selectors: [ ]
|
||||
|
||||
# Send all logging output to syslog. The default is false.
|
||||
#logging.to_syslog: true
|
||||
|
||||
# If enabled, auditbeat periodically logs its internal metrics that have changed
|
||||
# in the last period. For each metric that changed, the delta from the value at
|
||||
# the beginning of the period is logged. Also, the total values for
|
||||
# all non-zero internal metrics are logged on shutdown. The default is true.
|
||||
#logging.metrics.enabled: true
|
||||
|
||||
# The period after which to log the internal metrics. The default is 30s.
|
||||
#logging.metrics.period: 30s
|
||||
|
||||
# Logging to rotating files. Set logging.to_files to false to disable logging to
|
||||
# files.
|
||||
logging.to_files: true
|
||||
logging.files:
|
||||
# Configure the path where the logs are written. The default is the logs directory
|
||||
# under the home path (the binary location).
|
||||
#path: /var/log/auditbeat
|
||||
|
||||
# The name of the files where the logs are written to.
|
||||
#name: auditbeat
|
||||
|
||||
# Configure log file size limit. If limit is reached, log file will be
|
||||
# automatically rotated
|
||||
#rotateeverybytes: 10485760 # = 10MB
|
||||
|
||||
# Number of rotated log files to keep. Oldest files will be deleted first.
|
||||
#keepfiles: 7
|
||||
|
||||
# The permissions mask to apply when rotating log files. The default value is 0600.
|
||||
# Must be a valid Unix-style file permissions mask expressed in octal notation.
|
||||
#permissions: 0600
|
||||
|
||||
# Set to true to log messages in json format.
|
||||
#logging.json: false
|
149
vendor/github.com/elastic/beats/auditbeat/auditbeat.yml
generated
vendored
149
vendor/github.com/elastic/beats/auditbeat/auditbeat.yml
generated
vendored
@ -1,149 +0,0 @@
|
||||
###################### Auditbeat Configuration Example #########################
|
||||
|
||||
# This is an example configuration file highlighting only the most common
|
||||
# options. The auditbeat.reference.yml file from the same directory contains all
|
||||
# the supported options with more comments. You can use it as a reference.
|
||||
#
|
||||
# You can find the full configuration reference here:
|
||||
# https://www.elastic.co/guide/en/beats/auditbeat/index.html
|
||||
|
||||
#========================== Modules configuration =============================
|
||||
auditbeat.modules:
|
||||
|
||||
- module: audit
|
||||
metricsets: [kernel]
|
||||
kernel.audit_rules: |
|
||||
## Define audit rules here.
|
||||
## Create file watches (-w) or syscall audits (-a or -A). Uncomment these
|
||||
## examples or add your own rules.
|
||||
|
||||
## If you are on a 64 bit platform, everything should be running
|
||||
## in 64 bit mode. This rule will detect any use of the 32 bit syscalls
|
||||
## because this might be a sign of someone exploiting a hole in the 32
|
||||
## bit API.
|
||||
#-a always,exit -F arch=b32 -S all -F key=32bit-abi
|
||||
|
||||
## Executions.
|
||||
#-a always,exit -F arch=b64 -S execve,execveat -k exec
|
||||
|
||||
## External access.
|
||||
#-a always,exit -F arch=b64 -S accept,bind,connect,recvfrom -F key=external-access
|
||||
|
||||
## Identity changes.
|
||||
#-w /etc/group -p wa -k identity
|
||||
#-w /etc/passwd -p wa -k identity
|
||||
#-w /etc/gshadow -p wa -k identity
|
||||
|
||||
## Unauthorized access attempts.
|
||||
#-a always,exit -F arch=b64 -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EACCES -k access
|
||||
#-a always,exit -F arch=b64 -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EPERM -k access
|
||||
|
||||
- module: audit
|
||||
metricsets: [file]
|
||||
file.paths:
|
||||
- /bin
|
||||
- /usr/bin
|
||||
- /sbin
|
||||
- /usr/sbin
|
||||
- /etc
|
||||
|
||||
|
||||
|
||||
#==================== Elasticsearch template setting ==========================
|
||||
setup.template.settings:
|
||||
index.number_of_shards: 3
|
||||
#index.codec: best_compression
|
||||
#_source.enabled: false
|
||||
|
||||
#================================ General =====================================
|
||||
|
||||
# The name of the shipper that publishes the network data. It can be used to group
|
||||
# all the transactions sent by a single shipper in the web interface.
|
||||
#name:
|
||||
|
||||
# The tags of the shipper are included in their own field with each
|
||||
# transaction published.
|
||||
#tags: ["service-X", "web-tier"]
|
||||
|
||||
# Optional fields that you can specify to add additional information to the
|
||||
# output.
|
||||
#fields:
|
||||
# env: staging
|
||||
|
||||
|
||||
#============================== Dashboards =====================================
|
||||
# These settings control loading the sample dashboards to the Kibana index. Loading
|
||||
# the dashboards is disabled by default and can be enabled either by setting the
|
||||
# options here, or by using the `-setup` CLI flag or the `setup` command.
|
||||
#setup.dashboards.enabled: false
|
||||
|
||||
# The URL from where to download the dashboards archive. By default this URL
|
||||
# has a value which is computed based on the Beat name and version. For released
|
||||
# versions, this URL points to the dashboard archive on the artifacts.elastic.co
|
||||
# website.
|
||||
#setup.dashboards.url:
|
||||
|
||||
#============================== Kibana =====================================
|
||||
|
||||
# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
|
||||
# This requires a Kibana endpoint configuration.
|
||||
setup.kibana:
|
||||
|
||||
# Kibana Host
|
||||
# Scheme and port can be left out and will be set to the default (http and 5601)
|
||||
# In case you specify and additional path, the scheme is required: http://localhost:5601/path
|
||||
# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
|
||||
#host: "localhost:5601"
|
||||
|
||||
#============================= Elastic Cloud ==================================
|
||||
|
||||
# These settings simplify using auditbeat with the Elastic Cloud (https://cloud.elastic.co/).
|
||||
|
||||
# The cloud.id setting overwrites the `output.elasticsearch.hosts` and
|
||||
# `setup.kibana.host` options.
|
||||
# You can find the `cloud.id` in the Elastic Cloud web UI.
|
||||
#cloud.id:
|
||||
|
||||
# The cloud.auth setting overwrites the `output.elasticsearch.username` and
|
||||
# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.
|
||||
#cloud.auth:
|
||||
|
||||
#================================ Outputs =====================================
|
||||
|
||||
# Configure what output to use when sending the data collected by the beat.
|
||||
|
||||
#-------------------------- Elasticsearch output ------------------------------
|
||||
output.elasticsearch:
|
||||
# Array of hosts to connect to.
|
||||
hosts: ["localhost:9200"]
|
||||
|
||||
# Optional protocol and basic auth credentials.
|
||||
#protocol: "https"
|
||||
#username: "elastic"
|
||||
#password: "changeme"
|
||||
|
||||
#----------------------------- Logstash output --------------------------------
|
||||
#output.logstash:
|
||||
# The Logstash hosts
|
||||
#hosts: ["localhost:5044"]
|
||||
|
||||
# Optional SSL. By default is off.
|
||||
# List of root certificates for HTTPS server verifications
|
||||
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
|
||||
|
||||
# Certificate for SSL client authentication
|
||||
#ssl.certificate: "/etc/pki/client/cert.pem"
|
||||
|
||||
# Client Certificate Key
|
||||
#ssl.key: "/etc/pki/client/cert.key"
|
||||
|
||||
#================================ Logging =====================================
|
||||
|
||||
# Sets log level. The default log level is info.
|
||||
# Available log levels are: critical, error, warning, info, debug
|
||||
#logging.level: debug
|
||||
|
||||
# At debug level, you can selectively enable logging only for some components.
|
||||
# To enable all selectors use ["*"]. Examples of other selectors are "beat",
|
||||
# "publish", "service".
|
||||
#logging.selectors: ["*"]
|
20
vendor/github.com/elastic/beats/auditbeat/cmd/root.go
generated
vendored
20
vendor/github.com/elastic/beats/auditbeat/cmd/root.go
generated
vendored
@ -1,20 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
"github.com/elastic/beats/metricbeat/beater"
|
||||
|
||||
cmd "github.com/elastic/beats/libbeat/cmd"
|
||||
)
|
||||
|
||||
// Name of the beat (auditbeat).
|
||||
const Name = "auditbeat"
|
||||
|
||||
// RootCmd for running auditbeat.
|
||||
var RootCmd *cmd.BeatsRootCmd
|
||||
|
||||
func init() {
|
||||
var runFlags = pflag.NewFlagSet(Name, pflag.ExitOnError)
|
||||
RootCmd = cmd.GenRootCmdWithRunFlags(Name, "", beater.New, runFlags)
|
||||
}
|
174
vendor/github.com/elastic/beats/auditbeat/datastore/datastore.go
generated
vendored
174
vendor/github.com/elastic/beats/auditbeat/datastore/datastore.go
generated
vendored
@ -1,174 +0,0 @@
|
||||
package datastore
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
|
||||
"github.com/elastic/beats/libbeat/paths"
|
||||
)
|
||||
|
||||
var (
|
||||
initDatastoreOnce sync.Once
|
||||
ds *boltDatastore
|
||||
)
|
||||
|
||||
// OpenBucket returns a new Bucket that stores data in {path.data}/beat.db.
|
||||
// The returned Bucket must be closed when finished to ensure all resources
|
||||
// are released.
|
||||
func OpenBucket(name string) (Bucket, error) {
|
||||
initDatastoreOnce.Do(func() {
|
||||
ds = &boltDatastore{
|
||||
path: paths.Resolve(paths.Data, "beat.db"),
|
||||
mode: 0600,
|
||||
}
|
||||
})
|
||||
|
||||
return ds.OpenBucket(name)
|
||||
}
|
||||
|
||||
// Datastore
|
||||
|
||||
type Datastore interface {
|
||||
OpenBucket(name string) (Bucket, error)
|
||||
}
|
||||
|
||||
type boltDatastore struct {
|
||||
mutex sync.Mutex
|
||||
useCount uint32
|
||||
path string
|
||||
mode os.FileMode
|
||||
db *bolt.DB
|
||||
}
|
||||
|
||||
func New(path string, mode os.FileMode) Datastore {
|
||||
return &boltDatastore{path: path, mode: mode}
|
||||
}
|
||||
|
||||
func (ds *boltDatastore) OpenBucket(bucket string) (Bucket, error) {
|
||||
ds.mutex.Lock()
|
||||
defer ds.mutex.Unlock()
|
||||
|
||||
// Initialize the Bolt DB.
|
||||
if ds.db == nil {
|
||||
var err error
|
||||
ds.db, err = bolt.Open(ds.path, ds.mode, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure the name exists.
|
||||
err := ds.db.Update(func(tx *bolt.Tx) error {
|
||||
_, err := tx.CreateBucketIfNotExists([]byte(bucket))
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &boltBucket{ds, bucket}, nil
|
||||
}
|
||||
|
||||
func (ds *boltDatastore) done() {
|
||||
ds.mutex.Lock()
|
||||
defer ds.mutex.Unlock()
|
||||
|
||||
if ds.useCount > 0 {
|
||||
ds.useCount--
|
||||
|
||||
if ds.useCount == 0 {
|
||||
ds.db.Close()
|
||||
ds.db = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Bucket
|
||||
|
||||
type Bucket interface {
|
||||
io.Closer
|
||||
Load(key string, f func(blob []byte) error) error
|
||||
Store(key string, blob []byte) error
|
||||
Delete(key string) error // Delete removes a key from the bucket. If the key does not exist then nothing is done and a nil error is returned.
|
||||
DeleteBucket() error // Deletes and closes the bucket.
|
||||
}
|
||||
|
||||
// BoltBucket is a Bucket that exposes some Bolt specific APIs.
|
||||
type BoltBucket interface {
|
||||
Bucket
|
||||
View(func(tx *bolt.Bucket) error) error
|
||||
Update(func(tx *bolt.Bucket) error) error
|
||||
}
|
||||
|
||||
type boltBucket struct {
|
||||
ds *boltDatastore
|
||||
name string
|
||||
}
|
||||
|
||||
func (b *boltBucket) Load(key string, f func(blob []byte) error) error {
|
||||
return b.ds.db.View(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte(b.name))
|
||||
|
||||
data := b.Get([]byte(key))
|
||||
if data == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return f(data)
|
||||
})
|
||||
}
|
||||
|
||||
func (b *boltBucket) Store(key string, blob []byte) error {
|
||||
return b.ds.db.Update(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte(b.name))
|
||||
return b.Put([]byte(key), blob)
|
||||
})
|
||||
}
|
||||
|
||||
func (b *boltBucket) ForEach(f func(key string, blob []byte) error) error {
|
||||
return b.ds.db.View(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte(b.name))
|
||||
|
||||
return b.ForEach(func(k, v []byte) error {
|
||||
return f(string(k), v)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func (b *boltBucket) Delete(key string) error {
|
||||
return b.ds.db.Update(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte(b.name))
|
||||
return b.Delete([]byte(key))
|
||||
})
|
||||
}
|
||||
|
||||
func (b *boltBucket) DeleteBucket() error {
|
||||
err := b.ds.db.Update(func(tx *bolt.Tx) error {
|
||||
return tx.DeleteBucket([]byte(b.name))
|
||||
})
|
||||
b.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *boltBucket) View(f func(*bolt.Bucket) error) error {
|
||||
return b.ds.db.View(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte(b.name))
|
||||
return f(b)
|
||||
})
|
||||
}
|
||||
|
||||
func (b *boltBucket) Update(f func(*bolt.Bucket) error) error {
|
||||
return b.ds.db.Update(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte(b.name))
|
||||
return f(b)
|
||||
})
|
||||
}
|
||||
|
||||
func (b *boltBucket) Close() error {
|
||||
b.ds.done()
|
||||
b.ds = nil
|
||||
return nil
|
||||
}
|
6
vendor/github.com/elastic/beats/auditbeat/docs/auditbeat-filtering.asciidoc
generated
vendored
6
vendor/github.com/elastic/beats/auditbeat/docs/auditbeat-filtering.asciidoc
generated
vendored
@ -1,6 +0,0 @@
|
||||
[[filtering-and-enhancing-data]]
|
||||
== Filter and enhance the exported data
|
||||
|
||||
include::../../libbeat/docs/processors.asciidoc[]
|
||||
|
||||
include::../../libbeat/docs/processors-using.asciidoc[]
|
7
vendor/github.com/elastic/beats/auditbeat/docs/auditbeat-general-options.asciidoc
generated
vendored
7
vendor/github.com/elastic/beats/auditbeat/docs/auditbeat-general-options.asciidoc
generated
vendored
@ -1,7 +0,0 @@
|
||||
[[configuration-general-options]]
|
||||
== Specify general settings
|
||||
|
||||
You can specify settings in the +{beatname_lc}.yml+ config file to control the
|
||||
general behavior of {beatname_uc}.
|
||||
|
||||
include::../../libbeat/docs/generalconfig.asciidoc[]
|
33
vendor/github.com/elastic/beats/auditbeat/docs/auditbeat-modules-config.asciidoc
generated
vendored
33
vendor/github.com/elastic/beats/auditbeat/docs/auditbeat-modules-config.asciidoc
generated
vendored
@ -1,33 +0,0 @@
|
||||
[id="configuration-{beatname_lc}"]
|
||||
== Specify which modules to run
|
||||
|
||||
To enable specific modules and metricsets, you add entries to the
|
||||
`auditbeat.modules` list in the +{beatname_lc}.yml+ config file. Each entry in
|
||||
the list begins with a dash (-) and is followed by settings for that module.
|
||||
|
||||
The following example shows a configuration that runs the `audit` module with
|
||||
the `kernel` and `file` metricsets enabled:
|
||||
|
||||
[source,yaml]
|
||||
----
|
||||
auditbeat.modules:
|
||||
|
||||
- module: audit
|
||||
metricsets: [kernel]
|
||||
kernel.audit_rules: |
|
||||
-w /etc/passwd -p wa -k identity
|
||||
-a always,exit -F arch=b32 -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EPERM -k access
|
||||
|
||||
- module: audit
|
||||
metricsets: [file]
|
||||
file.paths:
|
||||
- /bin
|
||||
- /usr/bin
|
||||
- /sbin
|
||||
- /usr/sbin
|
||||
- /etc
|
||||
----
|
||||
|
||||
The configuration details vary by module. See the
|
||||
<<{beatname_lc}-modules,module documentation>> for more detail about
|
||||
configuring the available modules and metricsets.
|
77
vendor/github.com/elastic/beats/auditbeat/docs/configuring-howto.asciidoc
generated
vendored
77
vendor/github.com/elastic/beats/auditbeat/docs/configuring-howto.asciidoc
generated
vendored
@ -1,77 +0,0 @@
|
||||
[id="configuring-howto-{beatname_lc}"]
|
||||
= Configuring {beatname_uc}
|
||||
|
||||
[partintro]
|
||||
--
|
||||
Before modifying configuration settings, make sure you've completed the
|
||||
<<{beatname_lc}-configuration,configuration steps>> in the Getting Started.
|
||||
This section describes some common use cases for changing configuration options.
|
||||
|
||||
To configure {beatname_uc}, you edit the configuration file. For rpm and deb,
|
||||
you’ll find the configuration file at +/etc/{beatname_lc}/{beatname_lc}.yml+.
|
||||
There's also a full example configuration file at
|
||||
+/etc/{beatname_lc}/{beatname_lc}.reference.yml+ that shows all non-deprecated
|
||||
options. For mac and win, look in the archive that you extracted.
|
||||
|
||||
The {beatname_uc} configuration file uses http://yaml.org/[YAML] for its syntax.
|
||||
See the {libbeat}/config-file-format.html[Config File Format] section of the
|
||||
_Beats Platform Reference_ for more about the structure of the config file.
|
||||
|
||||
The following topics describe how to configure {beatname_uc}:
|
||||
|
||||
* <<configuration-{beatname_lc}>>
|
||||
* <<configuration-general-options>>
|
||||
* <<{beatname_lc}-configuration-reloading>>
|
||||
* <<configuring-internal-queue>>
|
||||
* <<configuring-output>>
|
||||
* <<configuration-ssl>>
|
||||
* <<filtering-and-enhancing-data>>
|
||||
* <<configuring-ingest-node>>
|
||||
* <<configuration-path>>
|
||||
* <<setup-kibana-endpoint>>
|
||||
* <<configuration-dashboards>>
|
||||
* <<configuration-template>>
|
||||
* <<configuration-logging>>
|
||||
* <<using-environ-vars>>
|
||||
* <<yaml-tips>>
|
||||
* <<{beatname_lc}-reference-yml>>
|
||||
|
||||
After changing configuration settings, you need to restart {beatname_uc} to
|
||||
pick up the changes.
|
||||
|
||||
--
|
||||
|
||||
include::./auditbeat-modules-config.asciidoc[]
|
||||
|
||||
include::./auditbeat-general-options.asciidoc[]
|
||||
|
||||
include::./reload-configuration.asciidoc[]
|
||||
|
||||
:allplatforms:
|
||||
include::../../libbeat/docs/queueconfig.asciidoc[]
|
||||
|
||||
include::../../libbeat/docs/outputconfig.asciidoc[]
|
||||
|
||||
include::../../libbeat/docs/shared-ssl-config.asciidoc[]
|
||||
|
||||
include::./auditbeat-filtering.asciidoc[]
|
||||
|
||||
include::../../libbeat/docs/shared-config-ingest.asciidoc[]
|
||||
|
||||
include::../../libbeat/docs/shared-path-config.asciidoc[]
|
||||
|
||||
include::../../libbeat/docs/shared-kibana-config.asciidoc[]
|
||||
|
||||
include::../../libbeat/docs/setup-config.asciidoc[]
|
||||
|
||||
include::../../libbeat/docs/loggingconfig.asciidoc[]
|
||||
|
||||
:standalone:
|
||||
include::../../libbeat/docs/shared-env-vars.asciidoc[]
|
||||
|
||||
:standalone:
|
||||
:allplatforms:
|
||||
include::../../libbeat/docs/yaml.asciidoc[]
|
||||
|
||||
include::../../libbeat/docs/reference-yml.asciidoc[]
|
||||
|
29
vendor/github.com/elastic/beats/auditbeat/docs/faq-ulimit.asciidoc
generated
vendored
29
vendor/github.com/elastic/beats/auditbeat/docs/faq-ulimit.asciidoc
generated
vendored
@ -1,29 +0,0 @@
|
||||
[float]
|
||||
[[ulimit]]
|
||||
=== {beatname_uc} fails to watch folders because too many files are open?
|
||||
|
||||
Because of the way file monitoring is implemented on macOS, you may see a
|
||||
warning similar to the following:
|
||||
|
||||
[source,shell]
|
||||
----
|
||||
eventreader_fsnotify.go:42: WARN [audit.file] Failed to watch /usr/bin: too many
|
||||
open files (check the max number of open files allowed with 'ulimit -a')
|
||||
----
|
||||
|
||||
To resolve this issue, run {beatname_uc} with the `ulimit` set to a larger
|
||||
value, for example:
|
||||
|
||||
["source","sh",subs="attributes"]
|
||||
----
|
||||
sudo sh -c 'ulimit -n 8192 && ./{beatname_uc} -e
|
||||
----
|
||||
|
||||
Or:
|
||||
|
||||
["source","sh",subs="attributes"]
|
||||
----
|
||||
sudo su
|
||||
ulimit -n 8192
|
||||
./{beatname_lc} -e
|
||||
----
|
12
vendor/github.com/elastic/beats/auditbeat/docs/faq.asciidoc
generated
vendored
12
vendor/github.com/elastic/beats/auditbeat/docs/faq.asciidoc
generated
vendored
@ -1,12 +0,0 @@
|
||||
[[faq]]
|
||||
== Frequently asked questions
|
||||
|
||||
This section contains frequently asked questions about {beatname_uc}. Also check
|
||||
out the
|
||||
https://discuss.elastic.co/c/beats/{beatname_lc}[{beatname_uc} discussion forum].
|
||||
|
||||
include::./faq-ulimit.asciidoc[]
|
||||
|
||||
include::../../libbeat/docs/faq-limit-bandwidth.asciidoc[]
|
||||
|
||||
include::../../libbeat/docs/shared-faq.asciidoc[]
|
2458
vendor/github.com/elastic/beats/auditbeat/docs/fields.asciidoc
generated
vendored
2458
vendor/github.com/elastic/beats/auditbeat/docs/fields.asciidoc
generated
vendored
File diff suppressed because it is too large
Load Diff
294
vendor/github.com/elastic/beats/auditbeat/docs/getting-started.asciidoc
generated
vendored
294
vendor/github.com/elastic/beats/auditbeat/docs/getting-started.asciidoc
generated
vendored
@ -1,294 +0,0 @@
|
||||
[id="{beatname_lc}-getting-started"]
|
||||
== Getting started with {beatname_uc}
|
||||
|
||||
To get started with your own {beatname_uc} setup, install and configure these
|
||||
related products:
|
||||
|
||||
* Elasticsearch for storage and indexing the data.
|
||||
* Kibana for the UI.
|
||||
* Logstash (optional) for inserting data into Elasticsearch.
|
||||
|
||||
See {libbeat}/getting-started.html[Getting Started with Beats and the Elastic Stack]
|
||||
for more information.
|
||||
|
||||
After installing the Elastic Stack, read the following topics to learn how to
|
||||
install, configure, and run {beatname_uc}:
|
||||
|
||||
* <<{beatname_lc}-installation>>
|
||||
* <<{beatname_lc}-configuration>>
|
||||
* <<{beatname_lc}-template>>
|
||||
* <<load-kibana-dashboards>>
|
||||
* <<{beatname_lc}-starting>>
|
||||
* <<view-kibana-dashboards>>
|
||||
* <<setup-repositories>>
|
||||
|
||||
[id="{beatname_lc}-installation"]
|
||||
=== Step 1: Install {beatname_uc}
|
||||
|
||||
You should install {beatname_uc} on all the servers you want to monitor.
|
||||
|
||||
include::../../libbeat/docs/shared-download-and-install.asciidoc[]
|
||||
|
||||
[[deb]]
|
||||
*deb:*
|
||||
|
||||
ifeval::["{release-state}"=="unreleased"]
|
||||
|
||||
Version {stack-version} of {beatname_uc} has not yet been released.
|
||||
|
||||
endif::[]
|
||||
|
||||
ifeval::["{release-state}"!="unreleased"]
|
||||
|
||||
["source","sh",subs="attributes"]
|
||||
------------------------------------------------
|
||||
curl -L -O https://artifacts.elastic.co/downloads/beats/{beatname_lc}/{beatname_lc}-{version}-amd64.deb
|
||||
sudo dpkg -i {beatname_lc}-{version}-amd64.deb
|
||||
------------------------------------------------
|
||||
|
||||
endif::[]
|
||||
|
||||
[[rpm]]
|
||||
*rpm:*
|
||||
|
||||
ifeval::["{release-state}"=="unreleased"]
|
||||
|
||||
Version {stack-version} of {beatname_uc} has not yet been released.
|
||||
|
||||
endif::[]
|
||||
|
||||
ifeval::["{release-state}"!="unreleased"]
|
||||
|
||||
["source","sh",subs="attributes"]
|
||||
------------------------------------------------
|
||||
curl -L -O https://artifacts.elastic.co/downloads/beats/{beatname_lc}/{beatname_lc}-{version}-x86_64.rpm
|
||||
sudo rpm -vi {beatname_lc}-{version}-x86_64.rpm
|
||||
------------------------------------------------
|
||||
|
||||
endif::[]
|
||||
|
||||
[[mac]]
|
||||
*mac:*
|
||||
|
||||
ifeval::["{release-state}"=="unreleased"]
|
||||
|
||||
Version {stack-version} of {beatname_uc} has not yet been released.
|
||||
|
||||
endif::[]
|
||||
|
||||
ifeval::["{release-state}"!="unreleased"]
|
||||
|
||||
["source","sh",subs="attributes"]
|
||||
------------------------------------------------
|
||||
curl -L -O https://artifacts.elastic.co/downloads/beats/{beatname_lc}/{beatname_lc}-{version}-darwin-x86_64.tar.gz
|
||||
tar xzvf {beatname_lc}-{version}-darwin-x86_64.tar.gz
|
||||
------------------------------------------------
|
||||
|
||||
endif::[]
|
||||
|
||||
[[docker]]
|
||||
*docker:*
|
||||
|
||||
ifeval::["{release-state}"=="unreleased"]
|
||||
|
||||
Version {stack-version} of {beatname_uc} has not yet been released.
|
||||
|
||||
endif::[]
|
||||
|
||||
ifeval::["{release-state}"!="unreleased"]
|
||||
|
||||
["source", "shell", subs="attributes"]
|
||||
------------------------------------------------
|
||||
docker pull {dockerimage}
|
||||
------------------------------------------------
|
||||
|
||||
endif::[]
|
||||
|
||||
[[win]]
|
||||
*win:*
|
||||
|
||||
ifeval::["{release-state}"=="unreleased"]
|
||||
|
||||
Version {stack-version} of {beatname_uc} has not yet been released.
|
||||
|
||||
endif::[]
|
||||
|
||||
ifeval::["{release-state}"!="unreleased"]
|
||||
|
||||
. Download the {beatname_uc} Windows zip file from the
|
||||
https://www.elastic.co/downloads/beats/{beatname_lc}[downloads page].
|
||||
|
||||
. Extract the contents of the zip file into `C:\Program Files`.
|
||||
|
||||
. Rename the +{beatname_lc}-<version>-windows+ directory to +{beatname_uc}+.
|
||||
|
||||
. Open a PowerShell prompt as an Administrator (right-click the PowerShell icon
|
||||
and select *Run As Administrator*). If you are running Windows XP, you may need
|
||||
to download and install PowerShell.
|
||||
|
||||
. From the PowerShell prompt, run the following commands to install {beatname_uc}
|
||||
as a Windows service:
|
||||
+
|
||||
["source","sh",subs="attributes"]
|
||||
----------------------------------------------------------------------
|
||||
PS > cd 'C:{backslash}Program Files{backslash}{beatname_uc}'
|
||||
PS C:{backslash}Program Files{backslash}{beatname_uc}> .{backslash}install-service-{beatname_lc}.ps1
|
||||
----------------------------------------------------------------------
|
||||
|
||||
NOTE: If script execution is disabled on your system, you need to set the
|
||||
execution policy for the current session to allow the script to run. For
|
||||
example: +PowerShell.exe -ExecutionPolicy UnRestricted -File
|
||||
.\install-service-{beatname_lc}.ps1+.
|
||||
|
||||
endif::[]
|
||||
|
||||
Before starting {beatname_uc}, you should look at the configuration options in the
|
||||
configuration file, for example +C:{backslash}Program Files{backslash}{beatname_uc}{backslash}{beatname_lc}.yml+.
|
||||
For more information about these options, see
|
||||
<<configuring-howto-{beatname_lc}>>.
|
||||
|
||||
[id="{beatname_lc}-configuration"]
|
||||
=== Step 2: Configure {beatname_uc}
|
||||
|
||||
include::../../libbeat/docs/shared-configuring.asciidoc[]
|
||||
|
||||
To configure {beatname_uc}:
|
||||
|
||||
. Define the {beatname_uc} modules that you want to enable. {beatname_uc} uses
|
||||
modules to collect the audit information. For each module, specify the
|
||||
metricsets that you want to collect.
|
||||
+
|
||||
The following example shows the `file` metricset configured to generate
|
||||
events whenever a file in one of the specified paths changes on disk:
|
||||
+
|
||||
["source","sh",subs="attributes"]
|
||||
-------------------------------------
|
||||
auditbeat.modules:
|
||||
|
||||
- module: audit
|
||||
metricsets: [file]
|
||||
file.paths:
|
||||
- /bin
|
||||
- /usr/bin
|
||||
- /sbin
|
||||
- /usr/sbin
|
||||
- /etc
|
||||
-------------------------------------
|
||||
+
|
||||
If you accept the default configuration without specifying additional modules,
|
||||
{beatname_uc} uses a configuration that's tailored to the operating system where
|
||||
{beatname_uc} is running.
|
||||
+
|
||||
See <<configuring-howto-{beatname_lc}>> for more details about configuring modules.
|
||||
|
||||
. If you are sending output to Elasticsearch (and not using Logstash), set the
|
||||
IP address and port where {beatname_uc} can find the Elasticsearch installation:
|
||||
+
|
||||
[source,yaml]
|
||||
----------------------------------------------------------------------
|
||||
output.elasticsearch:
|
||||
hosts: ["127.0.0.1:9200"]
|
||||
----------------------------------------------------------------------
|
||||
+
|
||||
If you are sending output to Logstash, make sure you
|
||||
<<logstash-output,Configure the Logstash output>> instead.
|
||||
|
||||
include::../../libbeat/docs/step-configure-kibana-endpoint.asciidoc[]
|
||||
|
||||
include::../../libbeat/docs/step-configure-credentials.asciidoc[]
|
||||
|
||||
include::../../libbeat/docs/step-test-config.asciidoc[]
|
||||
|
||||
include::../../libbeat/docs/step-look-at-config.asciidoc[]
|
||||
|
||||
[id="{beatname_lc}-template"]
|
||||
=== Step 3: Load the index template in Elasticsearch
|
||||
|
||||
:allplatforms:
|
||||
include::../../libbeat/docs/shared-template-load.asciidoc[]
|
||||
|
||||
[[load-kibana-dashboards]]
|
||||
=== Step 4: Set up the Kibana dashboards
|
||||
|
||||
:allplatforms:
|
||||
include::../../libbeat/docs/dashboards.asciidoc[]
|
||||
|
||||
[id="{beatname_lc}-starting"]
|
||||
=== Step 5: Start {beatname_uc}
|
||||
|
||||
Run {beatname_uc} by issuing the appropriate command for your platform. If you
|
||||
are accessing a secured Elasticsearch cluster, make sure you've configured
|
||||
credentials as described in <<{beatname_lc}-configuration>>.
|
||||
|
||||
NOTE: If you use an init.d script to start {beatname_uc} on deb or rpm, you can't
|
||||
specify command line flags (see <<command-line-options>>). To specify flags,
|
||||
start {beatname_uc} in the foreground.
|
||||
|
||||
*deb:*
|
||||
|
||||
["source","sh",subs="attributes"]
|
||||
----------------------------------------------------------------------
|
||||
sudo service {beatname_lc} start
|
||||
----------------------------------------------------------------------
|
||||
|
||||
*rpm:*
|
||||
|
||||
["source","sh",subs="attributes"]
|
||||
----------------------------------------------------------------------
|
||||
sudo service {beatname_lc} start
|
||||
----------------------------------------------------------------------
|
||||
|
||||
*mac:*
|
||||
|
||||
["source","sh",subs="attributes"]
|
||||
----------------------------------------------------------------------
|
||||
sudo chown root {beatname_lc}.yml <1>
|
||||
sudo ./{beatname_lc} -e -c {beatname_lc}.yml -d "publish"
|
||||
----------------------------------------------------------------------
|
||||
<1> To monitor system files, you'll be running {beatname_uc} as root, so you
|
||||
need to change ownership of the configuration file, or run {beatname_uc} with
|
||||
`-strict.perms=false` specified. See
|
||||
{libbeat}/config-file-permissions.html[Config File Ownership and Permissions]
|
||||
in the _Beats Platform Reference_.
|
||||
|
||||
If you see a warning about too many open files, you need to increase the
|
||||
`ulimit`. See the <<ulimit,FAQ>> for more details.
|
||||
|
||||
*win:*
|
||||
|
||||
["source","sh",subs="attributes"]
|
||||
----------------------------------------------------------------------
|
||||
PS C:{backslash}Program Files{backslash}{beatname_uc}> Start-Service {beatname_lc}
|
||||
----------------------------------------------------------------------
|
||||
|
||||
By default the log files are stored in +C:{backslash}ProgramData{backslash}{beatname_lc}{backslash}Logs+.
|
||||
|
||||
==== Test the {beatname_uc} installation
|
||||
|
||||
To verify that your server's statistics are present in Elasticsearch, issue
|
||||
the following command:
|
||||
|
||||
["source","sh",subs="attributes"]
|
||||
----------------------------------------------------------------------
|
||||
curl -XGET 'http://localhost:9200/{beatname_lc}-*/_search?pretty'
|
||||
----------------------------------------------------------------------
|
||||
|
||||
Make sure that you replace `localhost:9200` with the address of your
|
||||
Elasticsearch instance.
|
||||
|
||||
On Windows, if you don't have cURL installed, simply point your browser to the
|
||||
URL.
|
||||
|
||||
[[view-kibana-dashboards]]
|
||||
=== Step 6: View the sample Kibana dashboards
|
||||
|
||||
To make it easier for you to start auditing the activities of users and
|
||||
processes on your system, we have created example {beatname_uc} dashboards.
|
||||
You loaded the dashboards earlier when you ran the `setup` command.
|
||||
|
||||
include::../../libbeat/docs/opendashboards.asciidoc[]
|
||||
|
||||
The dashboards are provided as examples. We recommend that you
|
||||
{kibana-ref}/dashboard.html[customize] them to meet your needs.
|
||||
|
||||
image:./images/auditbeat-file-integrity-dashboard.png[Auditbeat File Integrity Dashboard]
|
BIN
vendor/github.com/elastic/beats/auditbeat/docs/images/auditbeat-file-integrity-dashboard.png
generated
vendored
BIN
vendor/github.com/elastic/beats/auditbeat/docs/images/auditbeat-file-integrity-dashboard.png
generated
vendored
Binary file not shown.
Before Width: | Height: | Size: 257 KiB |
Binary file not shown.
Before Width: | Height: | Size: 133 KiB |
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user