Update to libbeat 5.3.2

This commit is contained in:
Blerim Sheqa 2017-04-28 15:17:44 +02:00
parent 0924d77ea6
commit bbd8343629
196 changed files with 1277 additions and 10088 deletions

View File

@ -8,7 +8,7 @@
// Template, add newest changes here
=== Beats version HEAD
https://github.com/elastic/beats/compare/v5.3.0...master[Check the HEAD diff]
https://github.com/elastic/beats/compare/v5.3.1...master[Check the HEAD diff]
==== Breaking changes
@ -30,11 +30,14 @@ https://github.com/elastic/beats/compare/v5.3.0...master[Check the HEAD diff]
*Affecting all Beats*
*Filebeat*
- Properly shut down crawler in case one prospector is misconfigured. {pull}4037[4037]
- Fix panic in JSON decoding code if the input line is "null". {pull}4042[4042]
*Heartbeat*
*Metricbeat*
*Packetbeat*
*Winlogbeat*
@ -84,6 +87,26 @@ https://github.com/elastic/beats/compare/v5.3.0...master[Check the HEAD diff]
////////////////////////////////////////////////////////////
[[release-notes-5.3.1]]
=== Beats version 5.3.1
https://github.com/elastic/beats/compare/v5.3.0...v5.3.1[View commits]
==== Bugfixes
*Affecting all Beats*
- Fix panic when testing regex-AST to match against date patterns. {issue}3889[3889]
*Filebeat*
- Fix modules default file permissions. {pull}3879[3879]
- Allow `-` in Apache access log byte count. {pull}3863[3863]
*Metricbeat*
- Avoid errors when some Apache status fields are missing. {issue}3074[3074]
[[release-notes-5.3.0]]
=== Beats version 5.3.0
https://github.com/elastic/beats/compare/v5.2.2...v5.3.0[View commits]
@ -111,7 +134,9 @@ https://github.com/elastic/beats/compare/v5.2.2...v5.3.0[View commits]
- Add `_id`, `_type`, `_index` and `_score` fields in the generated index pattern. {pull}3282[3282]
*Filebeat*
- Always use absolute path for event and registry. {pull}3328[3328]
- Raise an exception in case there is a syntax error in one of the configuration files available under
filebeat.config_dir. {pull}3573[3573]
- Fix empty registry file on machine crash. {issue}3537[3537]
*Metricbeat*

View File

@ -21,13 +21,15 @@ import (
)
const (
expectedConfigMode = os.FileMode(0600)
expectedConfigUID = 0
expectedConfigGID = 0
expectedConfigMode = os.FileMode(0600)
expectedManifestMode = os.FileMode(0644)
expectedConfigUID = 0
expectedConfigGID = 0
)
var (
configFilePattern = regexp.MustCompile(`.*beat\.yml`)
configFilePattern = regexp.MustCompile(`.*beat\.yml`)
manifestFilePattern = regexp.MustCompile(`manifest.yml`)
)
var (
@ -73,6 +75,9 @@ func checkRPM(t *testing.T, file string) {
}
checkConfigPermissions(t, p)
checkConfigOwner(t, p)
checkManifestPermissions(t, p)
checkManifestOwner(t, p)
}
func checkDeb(t *testing.T, file string, buf *bytes.Buffer) {
@ -84,6 +89,8 @@ func checkDeb(t *testing.T, file string, buf *bytes.Buffer) {
checkConfigPermissions(t, p)
checkConfigOwner(t, p)
checkManifestPermissions(t, p)
checkManifestOwner(t, p)
}
func checkTar(t *testing.T, file string) {
@ -95,6 +102,7 @@ func checkTar(t *testing.T, file string) {
checkConfigPermissions(t, p)
checkConfigOwner(t, p)
checkManifestPermissions(t, p)
}
func checkZip(t *testing.T, file string) {
@ -105,6 +113,7 @@ func checkZip(t *testing.T, file string) {
}
checkConfigPermissions(t, p)
checkManifestPermissions(t, p)
}
// Verify that the main configuration file is installed with a 0600 file mode.
@ -115,7 +124,7 @@ func checkConfigPermissions(t *testing.T, p *packageFile) {
mode := entry.Mode.Perm()
if expectedConfigMode != mode {
t.Errorf("file %v has wrong permissions: expected=%v actual=%v",
entry.Mode, expectedConfigMode, mode)
entry.File, expectedConfigMode, mode)
}
return
}
@ -141,6 +150,37 @@ func checkConfigOwner(t *testing.T, p *packageFile) {
})
}
// Verify that the modules manifest.yml files are installed with a 0644 file mode.
func checkManifestPermissions(t *testing.T, p *packageFile) {
t.Run(p.Name+" manifest file permissions", func(t *testing.T) {
for _, entry := range p.Contents {
if manifestFilePattern.MatchString(entry.File) {
mode := entry.Mode.Perm()
if expectedManifestMode != mode {
t.Errorf("file %v has wrong permissions: expected=%v actual=%v",
entry.File, expectedManifestMode, mode)
}
}
}
})
}
// Verify that the manifest owner is root
func checkManifestOwner(t *testing.T, p *packageFile) {
t.Run(p.Name+" manifest file owner", func(t *testing.T) {
for _, entry := range p.Contents {
if manifestFilePattern.MatchString(entry.File) {
if expectedConfigUID != entry.UID {
t.Errorf("file %v should be owned by user %v, owner=%v", entry.File, expectedConfigGID, entry.UID)
}
if expectedConfigGID != entry.GID {
t.Errorf("file %v should be owned by group %v, group=%v", entry.File, expectedConfigGID, entry.GID)
}
}
}
})
}
// Helpers
type packageFile struct {

View File

@ -1 +1 @@
version: "1.1.0"
version: "5.3.2"

View File

@ -25,17 +25,24 @@ filebeat.modules:
# can be added under this section.
#prospector:
# Authorization logs
#auth:
#enabled: true
# Set custom paths for the log files. If left empty,
# Filebeat will choose the paths depending on your OS.
#var.paths:
# Prospector configuration (advanced). Any prospector configuration option
# can be added under this section.
#prospector:
#------------------------------- Apache2 Module ------------------------------
#- module: apache2
# Access logs
#access:
#enabled: true
# Ingest Node pipeline to use. Options are `with_plugins` (default)
# and `no_plugins`. Use `no_plugins` if you don't have the geoip or
# the user agent Node ingest plugins installed.
#var.pipeline: with_plugins
# Set custom paths for the log files. If left empty,
# Filebeat will choose the paths depending on your OS.
#var.paths:
@ -139,11 +146,6 @@ filebeat.modules:
#access:
#enabled: true
# Ingest Node pipeline to use. Options are `with_plugins` (default)
# and `no_plugins`. Use `no_plugins` if you don't have the geoip or
# the user agent Node ingest plugins installed.
#var.pipeline: with_plugins
# Set custom paths for the log files. If left empty,
# Filebeat will choose the paths depending on your OS.
#var.paths:
@ -183,6 +185,9 @@ filebeat.prospectors:
#------------------------------ Log prospector --------------------------------
- input_type: log
# Change to true to enable this prospector configuration.
enabled: false
# Paths that should be crawled and fetched. Glob based paths.
# To fetch all ".log" files from a specific level of subdirectories
# /var/log/*/*.log can be used.
@ -249,6 +254,11 @@ filebeat.prospectors:
# This is especially useful for multiline log messages which can get large.
#max_bytes: 10485760
### Recursive glob configuration
# Expand "**" patterns into regular glob patterns.
#recursive_glob.enabled: true
### JSON configuration
# Decode JSON options. Enable this if your logs are structured in JSON.
@ -399,3 +409,10 @@ filebeat.prospectors:
# How long filebeat waits on shutdown for the publisher to finish.
# Default is 0, not waiting.
#filebeat.shutdown_timeout: 0
# Enable filebeat config reloading
#filebeat.config.prospectors:
#enabled: false
#path: configs/*.yml
#reload.enabled: true
#reload.period: 10s

View File

@ -13,22 +13,85 @@ filebeat.modules:
#------------------------------- System Module -------------------------------
#- module: system
# Syslog
#syslog:
#enabled: true
# Set custom paths for the log files. If left empty,
# Filebeat will choose the paths depending on your OS.
#var.paths:
# Authorization logs
#auth:
#enabled: true
# Set custom paths for the log files. If left empty,
# Filebeat will choose the paths depending on your OS.
#var.paths:
#------------------------------- Apache2 Module ------------------------------
#- module: apache2
# Access logs
#access:
#enabled: true
# Set custom paths for the log files. If left empty,
# Filebeat will choose the paths depending on your OS.
#var.paths:
# Error logs
#error:
#enabled: true
# Set custom paths for the log files. If left empty,
# Filebeat will choose the paths depending on your OS.
#var.paths:
#------------------------------- Auditd Module -------------------------------
#- module: auditd
#log:
#enabled: true
# Set custom paths for the log files. If left empty,
# Filebeat will choose the paths depending on your OS.
#var.paths:
#------------------------------- Icinga Module -------------------------------
#- module: icinga
#-------------------------------- MySQL Module -------------------------------
#- module: mysql
# Error logs
#error:
#enabled: true
# Set custom paths for the log files. If left empty,
# Filebeat will choose the paths depending on your OS.
#var.paths:
# Slow logs
#slowlog:
#enabled: true
# Set custom paths for the log files. If left empty,
# Filebeat will choose the paths depending on your OS.
#var.paths:
#-------------------------------- Nginx Module -------------------------------
#- module: nginx
# Ingest Node pipeline to use. Options are `with_plugins` (default)
# and `no_plugins`. Use `no_plugins` if you don't have the geoip or
# the user agent Node ingest plugins installed.
#access.var.pipeline: with_plugins
# Access logs
#access:
#enabled: true
# Set custom paths for the log files. If left empty,
# Filebeat will choose the paths depending on your OS.
#var.paths:
# Error logs
#error:
#enabled: true
# Set custom paths for the log files. If left empty,
# Filebeat will choose the paths depending on your OS.
#var.paths:
# For more available modules and options, please see the filebeat.full.yml sample
@ -44,6 +107,9 @@ filebeat.prospectors:
- input_type: log
# Change to true to enable this prospector configuration.
enabled: false
# Paths that should be crawled and fetched. Glob based paths.
paths:
- /var/log/*.log

View File

@ -177,6 +177,7 @@ func (fb *Filebeat) Run(b *beat.Beat) error {
err = crawler.Start(registrar, config.ProspectorReload)
if err != nil {
crawler.Stop()
return err
}

View File

@ -76,7 +76,7 @@ func (c *Crawler) startProspector(config *common.Config, states []file.State) er
err = p.LoadStates(states)
if err != nil {
return fmt.Errorf("error loading states for propsector %v: %v", p.ID(), err)
return fmt.Errorf("error loading states for prospector %v: %v", p.ID(), err)
}
c.prospectors[p.ID()] = p

View File

@ -11,6 +11,10 @@ To configure {beatname_uc}, you edit the configuration file. For rpm and deb, yo
+/etc/{beatname_lc}/{beatname_lc}.yml+. There's also a full example configuration file at
+/etc/{beatname_lc}/{beatname_lc}.full.yml+ that shows all non-deprecated options. For mac and win, look in the archive that you extracted.
See the
{libbeat}/config-file-format.html[Config File Format] section of the
_Beats Platform Reference_ for more about the structure of the config file.
The following topics describe how to configure Filebeat:
* <<filebeat-configuration-details>>

View File

@ -33,7 +33,7 @@ it's publishing events successfully:
[float]
[[open-file-handlers]]
== Too many open file handlers?
=== Too many open file handlers?
Filebeat keeps the file handler open in case it reaches the end of a file so that it can read new log lines in near real time. If Filebeat is harvesting a large number of files, the number of open files can become an issue. In most environments, the number of files that are actively updated is low. The `close_inactive` configuration option should be set accordingly to close files that are no longer active.

View File

@ -11,131 +11,20 @@ See {libbeat}/getting-started.html[Getting Started with Beats and the Elastic St
After installing the Elastic Stack, read the following topics to learn how to install, configure, and run Filebeat:
* <<filebeat-modules-quickstart>>
* <<filebeat-installation>>
* <<filebeat-configuration>>
* <<config-filebeat-logstash>>
* <<filebeat-template>>
* <<filebeat-starting>>
* <<filebeat-index-pattern>>
* <<filebeat-modules-quickstart>>
* <<filebeat-command-line>>
* <<directory-layout>>
[[filebeat-modules-quickstart]]
=== Quick Start for Common Log Formats
beta[]
Filebeat provides a set of pre-built modules that you can use to rapidly
implement and deploy a log monitoring solution, complete with sample dashboards
and data visualizations, in about 5 minutes. These modules support common log
formats, such as Nginx, Apache2, and MySQL, and can be run by issuing a simple
command.
This topic shows you how to run the basic modules out of the box without extra
configuration. For detailed documentation and the full list of available
modules, see <<filebeat-modules>>.
Skip this topic and go to <<filebeat-installation>> if you are using a log file
type that isn't supported by one of the available Filebeat modules.
==== Prerequisites
Before running Filebeat with modules enabled, you need to:
* Install and configure the Elastic stack. See
{libbeat}/getting-started.html[Getting Started with Beats and the Elastic Stack].
* Complete the Filebeat installation instructions described in
<<filebeat-installation>>. After installing Filebeat, return to this
quick start page.
* Install the Ingest Node GeoIP and User Agent plugins, which you can do by
running the following commands in the Elasticsearch home path:
+
[source,shell]
----------------------------------------------------------------------
sudo bin/elasticsearch-plugin install ingest-geoip
sudo bin/elasticsearch-plugin install ingest-user-agent
----------------------------------------------------------------------
+
You need to restart Elasticsearch after running these commands.
* Verify that Elasticsearch and Kibana are running and that Elasticsearch is
ready to receive data from Filebeat.
//TODO: Follow up to find out whether ingest-geoip and ingest-user-agent will be bundled with ES. If so, remove the last prepreq.
[[running-modules-quickstart]]
==== Running Filebeat with Modules Enabled
To run one or more Filebeat modules, you issue the following command:
[source,shell]
----------------------------------------------------------------------
filebeat -e -modules=MODULES -setup
----------------------------------------------------------------------
Where `MODULES` is the name of the module (or a comma-separated list of
modules) that you want to enable. The `-e` flag is optional and sends output
to standard error instead of syslog. The `-setup` flag is a one-time setup step.
For subsequent runs of Filebeat, do not specify this flag.
For example, to start Filebeat with the `system` module enabled and load the
sample Kibana dashboards, run:
[source,shell]
----------------------------------------------------------------------
filebeat -e -modules=system -setup
----------------------------------------------------------------------
This command takes care of configuring Filebeat, loading the recommended index
template for writing to Elasticsearch, and deploying the sample dashboards
for visualizing the data in Kibana.
To start Filebeat with the `system`, `nginx`, and `mysql` modules enabled
and load the sample dashboards, run:
[source,shell]
----------------------------------------------------------------------
filebeat -e -modules=system,nginx,mysql -setup
----------------------------------------------------------------------
To start Filebeat with the `system` module enabled (it's assumed that
you've already loaded the sample dashboards), run:
[source,shell]
----------------------------------------------------------------------
filebeat -e -modules=system
----------------------------------------------------------------------
TIP: In a production environment, you'll probably want to use a configuration
file, rather than command-line flags, to specify which modules to run. See the
detailed documentation for more about configuring and running modules.
These examples assume that the logs you're harvesting are in the location
expected for your OS and that the default behavior of Filebeat is appropriate
for your environment. Each module provides a set of variables that you can set
to fine tune the behavior of Filebeat, including the location where it looks
for log files. See <<filebeat-modules>> for more info.
[[visualizing-data]]
==== Visualizing the Data in Kibana
After you've confirmed that Filebeat is sending events to Elasticsearch, launch
the Kibana web interface by pointing your browser to port 5601. For example,
http://127.0.0.1:5601[http://127.0.0.1:5601].
Open the dashboard and explore the visualizations for your parsed logs.
Here's an example of the syslog dashboard:
image:./images/kibana-system.png[Sylog dashboard]
[[filebeat-installation]]
=== Step 1: Installing Filebeat
Before running Filebeat, you need to install and configure the Elastic stack. See
Before running Filebeat, you need to install and configure the Elastic stack. See
{libbeat}/getting-started.html[Getting Started with Beats and the Elastic Stack].
To download and install Filebeat, use the commands that work with your system
@ -153,33 +42,71 @@ See our https://www.elastic.co/downloads/beats/filebeat[download page] for other
[[deb]]
*deb:*
ifeval::["{release-state}"=="unreleased"]
Version {version} of {beatname_uc} has not yet been released.
endif::[]
ifeval::["{release-state}"!="unreleased"]
["source","sh",subs="attributes,callouts"]
------------------------------------------------
curl -L -O https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-{version}-amd64.deb
sudo dpkg -i filebeat-{version}-amd64.deb
------------------------------------------------
endif::[]
[[rpm]]
*rpm:*
ifeval::["{release-state}"=="unreleased"]
Version {version} of {beatname_uc} has not yet been released.
endif::[]
ifeval::["{release-state}"!="unreleased"]
["source","sh",subs="attributes,callouts"]
------------------------------------------------
curl -L -O https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-{version}-x86_64.rpm
sudo rpm -vi filebeat-{version}-x86_64.rpm
------------------------------------------------
endif::[]
[[mac]]
*mac:*
ifeval::["{release-state}"=="unreleased"]
Version {version} of {beatname_uc} has not yet been released.
endif::[]
ifeval::["{release-state}"!="unreleased"]
["source","sh",subs="attributes,callouts"]
------------------------------------------------
curl -L -O https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-{version}-darwin-x86_64.tar.gz
tar xzvf filebeat-{version}-darwin-x86_64.tar.gz
------------------------------------------------
endif::[]
[[win]]
*win:*
ifeval::["{release-state}"=="unreleased"]
Version {version} of {beatname_uc} has not yet been released.
endif::[]
ifeval::["{release-state}"!="unreleased"]
. Download the Filebeat Windows zip file from the
https://www.elastic.co/downloads/beats/filebeat[downloads page].
@ -199,26 +126,27 @@ PS C:\Program Files\Filebeat> .\install-service-filebeat.ps1
NOTE: If script execution is disabled on your system, you need to set the execution policy for the current session to allow the script to run. For example: `PowerShell.exe -ExecutionPolicy UnRestricted -File .\install-service-filebeat.ps1`.
If you're using modules to get started with Filebeat, go back to the
<<filebeat-modules-quickstart>> page.
Otherwise, continue on to <<filebeat-configuration>>.
Before starting Filebeat, you should look at the configuration options in the configuration
file, for example `C:\Program Files\Filebeat\filebeat.yml` or `/etc/filebeat/filebeat.yml`. For more information about these options,
see <<filebeat-configuration-details>>.
endif::[]
[[filebeat-configuration]]
=== Step 2: Configuring Filebeat
TIP: <<filebeat-modules-overview,Filebeat modules>> provide the fastest getting
started experience for common log formats. See <<filebeat-modules-quickstart>> to
learn how to get started with modules.
started experience for common log formats. See <<filebeat-modules-quickstart>>
to learn how to get started with modules. If you use Filebeat modules to get
started, you can skip the content in this section, including the remaining
getting started steps, and go directly to the <<filebeat-modules-quickstart>>
page.
To configure Filebeat, you edit the configuration file. For rpm and deb, you'll
find the configuration file at `/etc/filebeat/filebeat.yml`. For mac and win, look in
the archive that you just extracted. Theres also a full example configuration file
called `filebeat.full.yml` that shows all non-deprecated options.
To configure Filebeat manually, you edit the configuration file. For rpm and deb,
you'll find the configuration file at `/etc/filebeat/filebeat.yml`. For mac and
win, look in the archive that you just extracted. Theres also a full example
configuration file called `filebeat.full.yml` that shows all non-deprecated
options.
See the
{libbeat}/config-file-format.html[Config File Format] section of the
_Beats Platform Reference_ for more about the structure of the config file.
Here is a sample of the `filebeat` section of the `filebeat.yml` file. Filebeat uses predefined
default values for most configuration options.
@ -271,7 +199,10 @@ options specified: +./filebeat -configtest -e+. Make sure your config files are
in the path expected by Filebeat (see <<directory-layout>>). If you
installed from DEB or RPM packages, run +./filebeat.sh -configtest -e+.
See <<filebeat-configuration-details>> for more details about each configuration option.
Before starting Filebeat, you should look at the configuration options in the
configuration file, for example `C:\Program Files\Filebeat\filebeat.yml` or
`/etc/filebeat/filebeat.yml`. For more information about these options,
see <<filebeat-configuration-details>>.
[[config-filebeat-logstash]]
=== Step 3: Configuring Filebeat to Use Logstash
@ -312,8 +243,13 @@ sudo /etc/init.d/filebeat start
[source,shell]
----------------------------------------------------------------------
sudo chown root filebeat.yml <1>
sudo ./filebeat -e -c filebeat.yml -d "publish"
----------------------------------------------------------------------
<1> You'll be running Filebeat as root, so you need to change ownership
of the configuration file (see
{libbeat}/config-file-permissions.html[Config File Ownership and Permissions]
in the _Beats Platform Reference_).
*win:*
@ -347,3 +283,4 @@ Filebeat data.
image:./images/filebeat-discover-tab.png[]
TIP: If you don't see `filebeat-*` in the list of available index patterns, try refreshing the page in your browser.

View File

@ -7,6 +7,7 @@ include::../../libbeat/docs/version.asciidoc[]
:metricbeat: http://www.elastic.co/guide/en/beats/metricbeat/{doc-branch}
:filebeat: http://www.elastic.co/guide/en/beats/filebeat/{doc-branch}
:winlogbeat: http://www.elastic.co/guide/en/beats/winlogbeat/{doc-branch}
:logstashdoc: https://www.elastic.co/guide/en/logstash/{doc-branch}
:elasticsearch: https://www.elastic.co/guide/en/elasticsearch/reference/{doc-branch}
:elasticsearch-plugins: https://www.elastic.co/guide/en/elasticsearch/plugins/{doc-branch}
:securitydoc: https://www.elastic.co/guide/en/x-pack/5.2
@ -19,6 +20,8 @@ include::./overview.asciidoc[]
include::./getting-started.asciidoc[]
include::./modules-getting-started.asciidoc[]
include::./command-line.asciidoc[]
include::../../libbeat/docs/shared-directory-layout.asciidoc[]
@ -43,6 +46,7 @@ include::./multiple-prospectors.asciidoc[]
include::./load-balancing.asciidoc[]
:standalone:
:allplatforms:
include::../../libbeat/docs/yaml.asciidoc[]

View File

@ -0,0 +1,119 @@
[[filebeat-modules-quickstart]]
=== Quick Start for Common Log Formats
beta[]
Filebeat provides a set of pre-built modules that you can use to rapidly
implement and deploy a log monitoring solution, complete with sample dashboards
and data visualizations, in about 5 minutes. These modules support common log
formats, such as Nginx, Apache2, and MySQL, and can be run by issuing a simple
command.
This topic shows you how to run the basic modules out of the box without extra
configuration. For detailed documentation and the full list of available
modules, see <<filebeat-modules>>.
If you are using a log file type that isn't supported by one of the available
Filebeat modules, you'll need to set up and configure Filebeat manually by
following the numbered steps under <<filebeat-getting-started>>.
==== Prerequisites
Before running Filebeat with modules enabled, you need to:
* Install and configure the Elastic stack. See
{libbeat}/getting-started.html[Getting Started with Beats and the Elastic Stack].
* Complete the Filebeat installation instructions described in
<<filebeat-installation>>. After installing Filebeat, return to this
quick start page.
* Install the Ingest Node GeoIP and User Agent plugins. These plugins are
required to capture the geographical location and browser information used by
some of the visualizations available in the sample dashboards. You can install
these plugins by running the following commands in the Elasticsearch home path:
+
[source,shell]
----------------------------------------------------------------------
sudo bin/elasticsearch-plugin install ingest-geoip
sudo bin/elasticsearch-plugin install ingest-user-agent
----------------------------------------------------------------------
+
You need to restart Elasticsearch after running these commands.
* Verify that Elasticsearch and Kibana are running and that Elasticsearch is
ready to receive data from Filebeat.
[[running-modules-quickstart]]
==== Running Filebeat with Modules Enabled
To run one or more Filebeat modules, you issue the following command:
[source,shell]
----------------------------------------------------------------------
./filebeat -e -modules=MODULES -setup
----------------------------------------------------------------------
Where `MODULES` is the name of the module (or a comma-separated list of
modules) that you want to enable. The `-e` flag is optional and sends output
to standard error instead of syslog. The `-setup` flag is a one-time setup step.
For subsequent runs of Filebeat, do not specify this flag.
The following example starts Filebeat with the `system` module enabled and
loads the sample Kibana dashboards:
[source,shell]
----------------------------------------------------------------------
./filebeat -e -modules=system -setup
----------------------------------------------------------------------
This command takes care of configuring Filebeat, loading the recommended index
template for writing to Elasticsearch, and deploying the sample dashboards
for visualizing the data in Kibana.
NOTE: Depending on how you've installed Filebeat, you might see errors
related to file ownership or permissions when you try to run Filebeat modules.
See {libbeat}/config-file-permissions.html[Config File Ownership and Permissions]
in the _Beats Platform Reference_ if you encounter errors related to file
ownership or permissions.
include::system-module-note.asciidoc[]
To start Filebeat with the `system`, `nginx`, and `mysql` modules enabled
and load the sample dashboards, run:
[source,shell]
----------------------------------------------------------------------
./filebeat -e -modules=system,nginx,mysql -setup
----------------------------------------------------------------------
To start Filebeat with the `system` module enabled (it's assumed that
you've already loaded the sample dashboards), run:
[source,shell]
----------------------------------------------------------------------
./filebeat -e -modules=system
----------------------------------------------------------------------
TIP: In a production environment, you'll probably want to use a configuration
file, rather than command-line flags, to specify which modules to run. See the
detailed documentation for more about configuring and running modules.
These examples assume that the logs you're harvesting are in the location
expected for your OS and that the default behavior of Filebeat is appropriate
for your environment. Each module provides a set of variables that you can set
to fine tune the behavior of Filebeat, including the location where it looks
for log files. See <<filebeat-modules>> for more info.
[[visualizing-data]]
==== Visualizing the Data in Kibana
After you've confirmed that Filebeat is sending events to Elasticsearch, launch
the Kibana web interface by pointing your browser to port 5601. For example,
http://127.0.0.1:5601[http://127.0.0.1:5601].
Open the dashboard and explore the visualizations for your parsed logs.
Here's an example of the syslog dashboard:
image:./images/kibana-system.png[Sylog dashboard]

View File

@ -37,13 +37,14 @@ Node.
This tutorial assumes you have Elasticsearch and Kibana installed and
accessible from Filebeat (see the <<filebeat-getting-started,getting started>>
section). It also assumes that the Ingest Node GeoIP and User Agent plugins are
installed, which you can do with the following two commands executed in the
Elasticsearch home path:
installed. These plugins are required to capture the geographical location and
browser information used by some of the visualizations available in the sample
dashboards. You can install these plugins by running the following commands in the Elasticsearch home path:
[source,shell]
----------------------------------------------------------------------
$ sudo bin/elasticsearch-plugin install ingest-geoip
$ sudo bin/elasticsearch-plugin install ingest-user-agent
sudo bin/elasticsearch-plugin install ingest-geoip
sudo bin/elasticsearch-plugin install ingest-user-agent
----------------------------------------------------------------------
You need to restart Elasticsearch after running these commands.
@ -59,7 +60,7 @@ You can start Filebeat with the following command:
[source,shell]
----------------------------------------------------------------------
$ filebeat -e -modules=nginx -setup
./filebeat -e -modules=nginx -setup
----------------------------------------------------------------------
The `-e` flag tells Filebeat to output its logs to standard error, instead of
@ -82,9 +83,11 @@ You can also start multiple modules at once:
[source,shell]
----------------------------------------------------------------------
$ filebeat -e -modules=nginx,mysql,system
./filebeat -e -modules=nginx,mysql,system
----------------------------------------------------------------------
include::system-module-note.asciidoc[]
While enabling the modules from the CLI file is handy for getting started and
for testing, you will probably want to use the configuration file for the
production setup. The equivalent of the above in the configuration file is:
@ -92,10 +95,10 @@ production setup. The equivalent of the above in the configuration file is:
[source,yaml]
----------------------------------------------------------------------
modules:
- name: nginx
- name: mysql
- name: syslog
filebeat.modules:
- module: nginx
- module: mysql
- module: system
----------------------------------------------------------------------
Then you can start Filebeat simply with: `./filebeat -e`.
@ -116,17 +119,17 @@ files are in a custom location:
[source,shell]
----------------------------------------------------------------------
$ filebeat -e -modules=nginx -M "nginx.access.var.paths=[/opt/apache2/logs/access.log*]"
./filebeat -e -modules=nginx -M "nginx.access.var.paths=[/var/log/nginx/access.log*]"
----------------------------------------------------------------------
Or via the configuration file:
[source,yaml]
----------------------------------------------------------------------
modules:
- name: nginx
filebeat.modules:
- module: nginx
access:
var.paths = ["/opt/apache2/logs/access.log*"]
var.paths = ["/var/log/nginx/access.log*"]
----------------------------------------------------------------------
The Nginx `access` fileset also has a `pipeline` variables which allows
@ -138,7 +141,7 @@ cannot install the plugins, you can use the following:
[source,shell]
----------------------------------------------------------------------
$ filebeat -e -modules=nginx -M "nginx.access.var.pipeline=no_plugins"
./filebeat -e -modules=nginx -M "nginx.access.var.pipeline=no_plugins"
----------------------------------------------------------------------
==== Advanced settings
@ -150,8 +153,8 @@ example, enabling <<close-eof,close_eof>> can be done like this:
[source,yaml]
----------------------------------------------------------------------
modules:
- name: nginx
filebeat.modules:
- module: nginx
access:
prospector:
close_eof: true
@ -162,7 +165,7 @@ Or like this:
[source,shell]
----------------------------------------------------------------------
$ filebeat -e -modules=nginx -M "nginx.access.prospector.close_eof=true"
./filebeat -e -modules=nginx -M "nginx.access.prospector.close_eof=true"
----------------------------------------------------------------------
From the CLI, it's possible to change variables or settings for multiple
@ -171,7 +174,7 @@ modules/fileset at once. For example, the following works and will enable
[source,shell]
----------------------------------------------------------------------
$ filebeat -e -modules=nginx -M "nginx.*.prospector.close_eof=true"
./filebeat -e -modules=nginx -M "nginx.*.prospector.close_eof=true"
----------------------------------------------------------------------
The following also works and will enable `close_eof` for all prospectors
@ -179,6 +182,5 @@ created by any of the modules:
[source,shell]
----------------------------------------------------------------------
filebeat -e -modules=nginx,mysql -M "*.*.prospector.close_eof=true"
./filebeat -e -modules=nginx,mysql -M "*.*.prospector.close_eof=true"
----------------------------------------------------------------------

View File

@ -5,7 +5,10 @@
Before modifying configuration settings, make sure you've completed the
<<filebeat-configuration,configuration steps>> in the Getting Started.
The {beatname_uc} configuration file, +{beatname_lc}.yml+, uses http://yaml.org/[YAML] for its syntax.
The {beatname_uc} configuration file, +{beatname_lc}.yml+, uses http://yaml.org/[YAML] for its syntax. See the
{libbeat}/config-file-format.html[Config File Format] section of the
_Beats Platform Reference_ for more about the structure of the config file.
The configuration options are described in the following sections. After changing
configuration settings, you need to restart {beatname_uc} to pick up the changes.
@ -20,6 +23,7 @@ configuration settings, you need to restart {beatname_uc} to pick up the changes
* <<file-output>>
* <<console-output>>
* <<configuration-output-ssl>>
* <<configuration-output-codec>>
* <<configuration-path>>
* <<configuration-dashboards>>
* <<configuration-logging>>

View File

@ -1,5 +1,5 @@
[[configuration-filebeat-options]]
=== Filebeat Prospectors Configuration
=== Filebeat Prospectors
The `filebeat` section of the +{beatname_lc}.yml+ config file specifies a list of `prospectors` that Filebeat
uses to locate and process log files. Each prospector item begins with a dash (-)
@ -294,6 +294,7 @@ If you require log lines to be sent in near real time do not use a very low `sca
The default setting is 10s.
[[filebeat-document-type]]
===== document_type
The event type to use for published lines read by harvesters. For Elasticsearch
@ -474,7 +475,7 @@ by assigning a higher limit of harvesters.
The `enabled` option can be used with each prospector to define if a prospector is enabled or not. By default, enabled is set to true.
[[configuration-global-options]]
=== Filebeat Global Configuration
=== Filebeat Global
You can specify configuration options in the +{beatname_lc}.yml+ config file to control Filebeat behavior at a global level.

View File

@ -3,11 +3,17 @@
beta[]
Reload configuration allows to dynamically reload prospector configuration files. A glob can be defined which should be watched
for prospector configuration changes. New prospectors will be started / stopped accordingly. This is especially useful in
container environments where 1 container is used to tail logs from services in other containers on the same host.
You can configure Filebeat to dynamically reload prospector configuration files
when there are changes. To do this, you specify a path
(https://golang.org/pkg/path/filepath/#Glob[Glob]) to watch for prospector
configuration changes. When the files found by the Glob change, new prospectors
are started/stopped according to changes in the configuration files.
The configuration in the main filebeat.yml config file looks as following:
This feature is especially useful in container environments where one container
is used to tail logs for services running in other containers on the same host.
To enable dynamic config reloading, you specify the `path` and `reload` options
in the main `filebeat.yml` config file. For example:
[source,yaml]
------------------------------------------------------------------------------
@ -17,11 +23,16 @@ filebeat.config.prospectors:
reload.period: 10s
------------------------------------------------------------------------------
A path with a glob must be defined on which files should be checked for changes. A period is set on how often
the files are checked for changes. Do not set period below 1s as the modification time of files is often stored in seconds.
Setting it below 1s will cause an unnecessary overhead.
`path`:: A Glob that defines the files to check for changes.
`reload.enabled`:: When set to `true`, enables dynamic config reload.
`reload.period`:: Specifies how often the files are checked for changes. Do not
set the `period` to less than 1s because the modification time of files is often
stored in seconds. Setting the `period` to less than 1s will result in
unnecessary overhead.
Each file found by the Glob must contain a list of one or more prospector
definitions. For example:
The configuration inside the files which are found by the glob look as following:
[source,yaml]
------------------------------------------------------------------------------
- input_type: log
@ -35,7 +46,6 @@ The configuration inside the files which are found by the glob look as following
scan_frequency: 5s
------------------------------------------------------------------------------
Each file directly contains a list of prospectors. Each file can contain one or multiple prospector definitions.
WARNING: It is critical that two running prospectors DO NOT have overlapping file paths defined. If more then one prospector
harvests the same file at the same time, it can lead to unexpected behaviour.
WARNING: It is critical that two running prospectors DO NOT have overlapping
file paths defined. If more than one prospector harvests the same file at the
same time, it can lead to unexpected behaviour.

View File

@ -0,0 +1,19 @@
[NOTE]
===============================================================================
Because Filebeat modules are currently in Beta, the default Filebeat
configuration may interfere with the Filebeat `system` module configuration. If
you plan to run the `system` module, edit the Filebeat configuration file,
`filebeat.yml`, and comment out the following lines:
[source,yaml]
----------------------------------------------------------------------
#- input_type: log
#paths:
#- /var/log/*.log
----------------------------------------------------------------------
For rpm and deb, you'll find the configuration file at
`/etc/filebeat/filebeat.yml`. For mac and win, look in the archive that you
extracted when you installed Filebeat.
===============================================================================

View File

@ -92,6 +92,35 @@
- name: meta.cloud.region
description: >
Region in which this host is running.
- key: kubernetes
title: Kubernetes info
description: >
Kubernetes metadata added by the kubernetes processor
fields:
- name: kubernetes.pod.name
type: keyword
description: >
Kubernetes pod name
- name: kubernetes.namespace
type: keyword
description: >
Kubernetes namespace
- name: kubernetes.labels
type: object
description: >
Kubernetes labels map
- name: kubernetes.annotations
type: object
description: >
Kubernetes annotations map
- name: kubernetes.container.name
type: keyword
description: >
Kubernetes container name
- key: log
title: Log File Content
description: >
@ -149,6 +178,7 @@
title: "Apache2"
description: >
Apache2 Module
short_config: true
fields:
- name: apache2
type: group
@ -297,6 +327,7 @@
title: "Auditd"
description: >
Module for parsing auditd logs.
short_config: true
fields:
- name: auditd
type: group
@ -453,6 +484,7 @@
title: "MySQL"
description: >
Module for parsing the MySQL log files.
short_config: true
fields:
- name: mysql
type: group
@ -528,6 +560,7 @@
title: "Nginx"
description: >
Module for parsing the Nginx log files.
short_config: true
fields:
- name: nginx
type: group
@ -672,6 +705,7 @@
title: "System"
description: >
Module for parsing system log files.
short_config: true
fields:
- name: system
type: group

View File

@ -7,7 +7,7 @@
}
},
"_meta": {
"version": "5.3.0"
"version": "5.3.2"
},
"date_detection": false,
"dynamic_templates": [

View File

@ -5,7 +5,7 @@
"norms": false
},
"_meta": {
"version": "5.3.0"
"version": "5.3.2"
},
"date_detection": false,
"dynamic_templates": [

View File

@ -30,7 +30,7 @@ func (r *JSON) decodeJSON(text []byte) ([]byte, common.MapStr) {
var jsonFields map[string]interface{}
err := unmarshal(text, &jsonFields)
if err != nil {
if err != nil || jsonFields == nil {
logp.Err("Error decoding JSON: %v", err)
if r.cfg.AddErrorKey {
jsonFields = common.MapStr{JsonErrorKey: fmt.Sprintf("Error decoding JSON: %v", err)}

View File

@ -116,6 +116,13 @@ func TestDecodeJSON(t *testing.T) {
ExpectedText: `{"message": "test", "value": "`,
ExpectedMap: nil,
},
{
// in case the JSON is "null", we should just not panic
Text: `null`,
Config: JSONConfig{MessageKey: "value", AddErrorKey: true},
ExpectedText: `null`,
ExpectedMap: common.MapStr{"json_error": "Error decoding JSON: <nil>"},
},
{
// Add key error helps debugging this
Text: `{"message": "test", "value": "`,

View File

@ -1,10 +1,10 @@
{
"description": "Pipeline for parsing Nginx access logs. Requires the geoip and user_agent plugins.",
"description": "Pipeline for parsing Apache2 access logs. Requires the geoip and user_agent plugins.",
"processors": [{
"grok": {
"field": "message",
"patterns":[
"%{IPORHOST:apache2.access.remote_ip} - %{DATA:apache2.access.user_name} \\[%{HTTPDATE:apache2.access.time}\\] \"%{WORD:apache2.access.method} %{DATA:apache2.access.url} HTTP/%{NUMBER:apache2.access.http_version}\" %{NUMBER:apache2.access.response_code} %{NUMBER:apache2.access.body_sent.bytes}( \"%{DATA:apache2.access.referrer}\")?( \"%{DATA:apache2.access.agent}\")?",
"%{IPORHOST:apache2.access.remote_ip} - %{DATA:apache2.access.user_name} \\[%{HTTPDATE:apache2.access.time}\\] \"%{WORD:apache2.access.method} %{DATA:apache2.access.url} HTTP/%{NUMBER:apache2.access.http_version}\" %{NUMBER:apache2.access.response_code} (?:%{NUMBER:apache2.access.body_sent.bytes}|-)( \"%{DATA:apache2.access.referrer}\")?( \"%{DATA:apache2.access.agent}\")?",
"%{IPORHOST:apache2.access.remote_ip} - %{DATA:apache2.access.user_name} \\[%{HTTPDATE:apache2.access.time}\\] \"-\" %{NUMBER:apache2.access.response_code} -"
],
"ignore_missing": true

View File

@ -134,7 +134,7 @@ func (p *Prospector) Start() {
logp.Info("Prospector channel stopped")
return
case <-p.beatDone:
logp.Info("Prospector channel stopped")
logp.Info("Prospector channel stopped because beat is stopping.")
return
case event := <-p.harvesterChan:
// No stopping on error, because on error it is expected that beatDone is closed

View File

@ -30,6 +30,10 @@ func NewProspectorLog(p *Prospector) (*ProspectorLog, error) {
config: p.config,
}
if len(p.config.Paths) == 0 {
return nil, fmt.Errorf("each prospector must have at least one path defined")
}
return prospectorer, nil
}

View File

@ -28,6 +28,7 @@ func TestProspectorFileExclude(t *testing.T) {
prospector := Prospector{
config: prospectorConfig{
Paths: []string{"test.log"},
ExcludeFiles: []match.Matcher{match.MustCompile(`\.gz$`)},
},
}

View File

@ -70,6 +70,9 @@ filebeat.prospectors:
max_lines: {{ max_lines|default(500) }}
{% endif %}
{% endif %}
{% if prospector_raw %}
{{prospector_raw}}
{% endif %}
filebeat.spool_size:
filebeat.shutdown_timeout: {{ shutdown_timeout|default(0) }}

View File

@ -146,3 +146,27 @@ class Test(BaseTest):
self.copy_files(["logs/nasa-50k.log"],
source_dir="../files",
target_dir="log")
def test_stopping_empty_path(self):
"""
Test filebeat stops properly when 1 prospector has an invalid config.
"""
prospector_raw = """
- input_type: log
paths: []
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
prospector_raw=prospector_raw,
)
filebeat = self.start_beat()
time.sleep(2)
# Wait until first flush
self.wait_until(
lambda: self.log_contains_count("No paths were defined for prospector") >= 1,
max_timeout=5)
filebeat.check_wait(exit_code=1)

View File

@ -14,6 +14,10 @@ configuration file at +/etc/heartbeat/heartbeat.full.yml+ that shows
all non-deprecated options. For mac and win, look in the archive that you
extracted.
See the
{libbeat}/config-file-format.html[Config File Format] section of the
_Beats Platform Reference_ for more about the structure of the config file.
The following topics describe how to configure Heartbeat:
* <<heartbeat-configuration-details>>

View File

@ -7,7 +7,7 @@ related products:
* Elasticsearch for storage and indexing the data.
* Kibana for the UI.
* Logstash (optional) for inserting data into Elasticsearch.
See {libbeat}/getting-started.html[Getting Started with Beats and the Elastic Stack]
for more information.
@ -28,9 +28,9 @@ install, configure, and run Heartbeat:
Unlike most Beats, which you install on edge nodes, you typically install
Heartbeat as part of monitoring service that runs on a separate machine
and possibly even outside of the network where the services that you want to
monitor are running.
monitor are running.
//TODO: Add a separate topic that explores deployment scenarios in more detail (like installing on a sub-network where there's a firewall etc.
//TODO: Add a separate topic that explores deployment scenarios in more detail (like installing on a sub-network where there's a firewall etc.
To download and install Heartbeat, use the commands that work with your
system (<<deb, deb>> for Debian/Ubuntu, <<rpm, rpm>> for Redhat/Centos/Fedora,
@ -47,33 +47,71 @@ See our https://www.elastic.co/downloads/beats/heartbeat[download page] for othe
[[deb]]
*deb:*
ifeval::["{release-state}"=="unreleased"]
Version {version} of {beatname_uc} has not yet been released.
endif::[]
ifeval::["{release-state}"!="unreleased"]
["source","sh",subs="attributes"]
----------------------------------------------------------------------
curl -L -O {downloads}/heartbeat/heartbeat-{version}-amd64.deb
sudo dpkg -i heartbeat-{version}-amd64.deb
----------------------------------------------------------------------
endif::[]
[[rpm]]
*rpm:*
ifeval::["{release-state}"=="unreleased"]
Version {version} of {beatname_uc} has not yet been released.
endif::[]
ifeval::["{release-state}"!="unreleased"]
["source","sh",subs="attributes"]
----------------------------------------------------------------------
curl -L -O {downloads}/heartbeat/heartbeat-{version}-x86_64.rpm
sudo rpm -vi heartbeat-{version}-x86_64.rpm
----------------------------------------------------------------------
endif::[]
[[mac]]
*mac:*
ifeval::["{release-state}"=="unreleased"]
Version {version} of {beatname_uc} has not yet been released.
endif::[]
ifeval::["{release-state}"!="unreleased"]
["source","sh",subs="attributes"]
------------------------------------------------
curl -L -O {downloads}/heartbeat/heartbeat-{version}-darwin-x86_64.tar.gz
tar xzvf heartbeat-{version}-darwin-x86_64.tar.gz
------------------------------------------------
endif::[]
[[win]]
*win:*
ifeval::["{release-state}"=="unreleased"]
Version {version} of {beatname_uc} has not yet been released.
endif::[]
ifeval::["{release-state}"!="unreleased"]
. Download the Heartbeat Windows zip file from the
https://www.elastic.co/downloads/beats/heartbeat[downloads page].
@ -98,6 +136,8 @@ execution policy for the current session to allow the script to run. For
example:
+PowerShell.exe -ExecutionPolicy UnRestricted -File .\install-service-heartbeat.ps1+.
endif::[]
Before starting Heartbeat, you should look at the configuration options in
the configuration file, for example +C:\Program Files\Heartbeat\heartbeat.yml+
or +/etc/heartbeat/heartbeat.yml+. For more information about these
@ -112,6 +152,10 @@ For mac and win, look in the archive that you just extracted. Theres also a
full example configuration file called `heartbeat.full.yml` that shows all
non-deprecated options.
See the
{libbeat}/config-file-format.html[Config File Format] section of the
_Beats Platform Reference_ for more about the structure of the config file.
Heartbeat provides monitors to check the status of hosts at set intervals.
You configure each monitor individually. Heartbeat currently provides monitors
for ICMP, TCP, and HTTP (see <<heartbeat-overview>> for more about these
@ -121,8 +165,8 @@ monitor:
[source,yaml]
----------------------------------------------------------------------
heartbeat.monitors:
- type: icmp
schedule: '*/5 * * * * * *'
- type: icmp
schedule: '*/5 * * * * * *'
hosts: ["myhost"]
output.elasticsearch:
hosts: ["myhost:9200"]
@ -140,7 +184,7 @@ heartbeat.monitors:
- type: icmp
schedule: '*/5 * * * * * *' <1>
hosts: ["myhost"]
- type: tcp
- type: tcp
schedule: '@every 5s' <2>
hosts: ["myhost:12345"]
mode: any <3>
@ -195,7 +239,7 @@ start Heartbeat in the foreground.
["source","sh",subs="attributes"]
----------------------------------------------------------------------
sudo /etc/init.d/ start
sudo /etc/init.d/heartbeat start
----------------------------------------------------------------------
*rpm:*
@ -209,8 +253,13 @@ sudo /etc/init.d/heartbeat start
["source","sh",subs="attributes"]
----------------------------------------------------------------------
sudo chown root heartbeat.yml <1>
sudo ./heartbeat -e -c heartbeat.yml -d "publish"
----------------------------------------------------------------------
<1> You'll be running Heartbeat as root, so you need to change ownership
of the configuration file (see
{libbeat}/config-file-permissions.html[Config File Ownership and Permissions]
in the _Beats Platform Reference_).
*win:*
@ -224,8 +273,17 @@ By default, Windows log files are stored in +C:\ProgramData\heartbeat\Logs+.
Heartbeat is now ready to check the status of your services and send
events to your defined output.
//TODO: Add content about sample dashboards when the dashboards are available.
[[heartbeat-sample-dashboards]]
=== Step 5: Loading Sample Kibana Dashboards
//:allplatforms:
To make it easier for you to visualize the status of your services, we have
created sample Heartbeat dashboards. The dashboards are provided as
examples. We recommend that you
http://www.elastic.co/guide/en/kibana/current/dashboard.html[customize] them
to meet your needs.
image:./images/heartbeat-statistics.png[Heartbeat statistics]
:allplatforms:
include::../../libbeat/docs/dashboards.asciidoc[]
//include::../../libbeat/docs/dashboards.asciidoc[]

Binary file not shown.

After

Width:  |  Height:  |  Size: 222 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 176 KiB

After

Width:  |  Height:  |  Size: 64 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 101 KiB

After

Width:  |  Height:  |  Size: 38 KiB

View File

@ -7,6 +7,7 @@ include::../../libbeat/docs/version.asciidoc[]
:metricbeat: http://www.elastic.co/guide/en/beats/metricbeat/{doc-branch}
:filebeat: http://www.elastic.co/guide/en/beats/filebeat/{doc-branch}
:winlogbeat: http://www.elastic.co/guide/en/beats/winlogbeat/{doc-branch}
:logstashdoc: https://www.elastic.co/guide/en/logstash/{doc-branch}
:elasticsearch: https://www.elastic.co/guide/en/elasticsearch/reference/{doc-branch}
:securitydoc: https://www.elastic.co/guide/en/x-pack/5.2
:downloads: https://artifacts.elastic.co/downloads/beats
@ -42,6 +43,7 @@ include::./configuring-logstash.asciidoc[]
include::../../libbeat/docs/shared-env-vars.asciidoc[]
:standalone:
:allplatforms:
include::../../libbeat/docs/yaml.asciidoc[]

View File

@ -5,7 +5,10 @@
Before modifying configuration settings, make sure you've completed the
<<heartbeat-configuration,configuration steps>> in the Getting Started.
The Heartbeat configuration file, +heartbeat.yml+, uses http://yaml.org/[YAML] for its syntax.
The Heartbeat configuration file, +heartbeat.yml+, uses http://yaml.org/[YAML] for its syntax. See the
{libbeat}/config-file-format.html[Config File Format] section of the
_Beats Platform Reference_ for more about the structure of the config file.
The configuration options are described in the following sections. After changing
configuration settings, you need to restart Heartbeat to pick up the changes.
@ -18,6 +21,7 @@ configuration settings, you need to restart Heartbeat to pick up the changes.
* <<file-output>>
* <<console-output>>
* <<configuration-output-ssl>>
* <<configuration-output-codec>>
* <<configuration-path>>
* <<configuration-dashboards>>
* <<configuration-logging>>

View File

@ -1,5 +1,5 @@
[[configuration-heartbeat-options]]
=== Heartbeat Configuration
=== Heartbeat
The `heartbeat` section of the +heartbeat.yml+ config file specifies the
list of `monitors` that Heartbeat uses to check your remote hosts to

View File

@ -1,240 +0,0 @@
- key: beat
title: Beat
description: >
Contains common beat fields available in all event types.
fields:
- name: beat.name
description: >
The name of the Beat sending the log messages. If the Beat name is
set in the configuration file, then that value is used. If it is not
set, the hostname is used. To set the Beat name, use the `name`
option in the configuration file.
- name: beat.hostname
description: >
The hostname as returned by the operating system on which the Beat is
running.
- name: beat.timezone
description: >
The timezone as returned by the operating system on which the Beat is
running.
- name: beat.version
description: >
The version of the beat that generated this event.
- name: "@timestamp"
type: date
required: true
format: date
example: August 26th 2016, 12:35:53.332
description: >
The timestamp when the event log record was generated.
- name: tags
description: >
Arbitrary tags that can be set per Beat and per transaction
type.
- name: fields
type: object
object_type: keyword
description: >
Contains user configurable fields.
- name: error
type: group
description: >
Error fields containing additional info in case of errors.
fields:
- name: message
type: text
description: >
Error message.
- name: code
type: long
description: >
Error code.
- name: type
type: keyword
description: >
Error type.
- key: cloud
title: Cloud Provider Metadata
description: >
Metadata from cloud providers added by the add_cloud_metadata processor.
fields:
- name: meta.cloud.provider
example: ec2
description: >
Name of the cloud provider. Possible values are ec2, gce, or digitalocean.
- name: meta.cloud.instance_id
description: >
Instance ID of the host machine.
- name: meta.cloud.machine_type
example: t2.medium
description: >
Machine type of the host machine.
- name: meta.cloud.availability_zone
example: us-east-1c
description: >
Availability zone in which this host is running.
- name: meta.cloud.project_id
example: project-x
description: >
Name of the project in Google Cloud.
- name: meta.cloud.region
description: >
Region in which this host is running.
- key: common
title: "Common monitoring fields"
description:
fields:
- name: type
type: keyword
required: true
description: >
The monitor type.
- name: monitor
type: keyword
description: >
Monitor job name.
- name: scheme
type: keyword
description: >
Address url scheme. For example `tcp`, `ssl`, `http`, and `https`.
- name: host
type: keyword
description: >
Hostname of service being monitored. Can be missing, if service is
monitored by IP.
- name: port
type: integer
description: >
Service port number.
- name: url
type: text
description: >
Service url used by monitor.
- name: ip
type: keyword
description: >
IP of service being monitored. If service is monitored by hostname,
the `ip` field contains the resolved ip address for the current host.
- name: duration
type: group
description: total monitoring test duration
fields:
- name: us
type: long
description: Duration in microseconds
- name: resolve_rtt
type: group
description: Duration required to resolve an IP from hostname.
fields:
- name: us
type: long
description: Duration in microseconds
- name: icmp_rtt
type: group
description: ICMP Echo Request and Reply round trip time
fields:
- name: us
type: long
description: Duration in microseconds
- name: tcp_connect_rtt
type: group
description: >
Duration required to establish a TCP connection based on already
available IP address.
fields:
- name: us
type: long
description: Duration in microseconds
- name: socks5_connect_rtt
type: group
description: >
Time required to establish a connection via SOCKS5 to endpoint based on available
connection to SOCKS5 proxy.
fields:
- name: us
type: long
description: Duration in microseconds
- name: tls_handshake_rtt
type: group
description: >
Time required to finish TLS handshake based on already available network
connection.
fields:
- name: us
type: long
description: Duration in microseconds
- name: http_rtt
type: group
description: >
Time required between sending the HTTP request and first by from HTTP
response being read. Duration based on already available network connection.
fields:
- name: us
type: long
description: Duration in microseconds
- name: validate_rtt
type: group
description: >
Time required for validating the connection if connection checks are configured.
fields:
- name: us
type: long
description: Duration in microseconds
- name: response
type: group
description: >
Service response parameters.
fields:
- name: status
type: integer
description: >
Response status code.
- name: up
required: true
type: boolean
description: >
Boolean indicator if monitor could validate the service to be available.
- name: error
type: group
description: >
Reason monitor flagging a service as down.
fields:
- name: type
type: keyword
description: >
Failure type. For example `io` or `validate`.
- name: message
type: text
description: >
Failure description.

View File

@ -7,7 +7,7 @@
}
},
"_meta": {
"version": "5.3.0"
"version": "5.3.2"
},
"date_detection": false,
"dynamic_templates": [

View File

@ -5,7 +5,7 @@
"norms": false
},
"_meta": {
"version": "5.3.0"
"version": "5.3.2"
},
"date_detection": false,
"dynamic_templates": [

View File

@ -1,3 +1,3 @@
package beat
const defaultBeatVersion = "5.3.0"
const defaultBeatVersion = "5.3.2"

View File

@ -90,7 +90,7 @@ func isPrefixNumDate(r *syntax.Regexp) bool {
i++
}
// check digits
// check starts with digits `\d{n}` or `[0-9]{n}`
if !isMultiDigits(r.Sub[i]) {
return false
}
@ -103,6 +103,11 @@ func isPrefixNumDate(r *syntax.Regexp) bool {
}
i++
// regex has 'OpLiteral' suffix, without any more digits/patterns following
if i == len(r.Sub) {
return true
}
// check digits
if !isMultiDigits(r.Sub[i]) {
return false

View File

@ -88,14 +88,21 @@ func compilePrefixNumDate(r *syntax.Regexp) (stringMatcher, error) {
i++
for i < len(r.Sub) {
seps = append(seps, []byte(string(r.Sub[i].Rune)))
lit := []byte(string(r.Sub[i].Rune))
i++
// capture literal suffix
if i == len(r.Sub) {
m.suffix = lit
break
}
seps = append(seps, lit)
digits = append(digits, digitLen(r.Sub[i]))
i++
}
minLen := len(m.prefix)
minLen := len(m.prefix) + len(m.suffix)
for _, d := range digits {
minLen += d
}

View File

@ -89,7 +89,9 @@ func BenchmarkPatterns(b *testing.B) {
{"startsWith ' '", `^ `},
{"startsWithDate", `^\d{2}-\d{2}-\d{4}`},
{"startsWithDate2", `^\d{4}-\d{2}-\d{2}`},
{"startsWithDate3", `^20\d{2}-\d{2}-\d{2}`},
{"startsWithDate3", `^\d\d\d\d-\d\d-\d\d`},
{"startsWithDate4", `^20\d{2}-\d{2}-\d{2}`},
{"startsWithDateAndSpace", `^\d{4}-\d{2}-\d{2} `},
{"startsWithLevel", `^(DEBUG|INFO|WARN|ERR|CRIT)`},
{"hasLevel", `(DEBUG|INFO|WARN|ERR|CRIT)`},
{"contains 'PATTERN'", `PATTERN`},

View File

@ -120,6 +120,18 @@ func TestMatchers(t *testing.T) {
"This should not match",
},
},
{
`^\d\d\d\d-\d\d-\d\d`,
typeOf((*prefixNumDate)(nil)),
[]string{
"2017-01-02 should match",
"2017-01-03 should also match",
},
[]string{
"- 2017-01-02 should not match",
"fail",
},
},
{
`^\d{4}-\d{2}-\d{2}`,
typeOf((*prefixNumDate)(nil)),
@ -132,6 +144,30 @@ func TestMatchers(t *testing.T) {
"fail",
},
},
{
`^(\d{2}){2}-\d{2}-\d{2}`,
typeOf((*prefixNumDate)(nil)),
[]string{
"2017-01-02 should match",
"2017-01-03 should also match",
},
[]string{
"- 2017-01-02 should not match",
"fail",
},
},
{
`^\d{4}-\d{2}-\d{2} - `,
typeOf((*prefixNumDate)(nil)),
[]string{
"2017-01-02 - should match",
"2017-01-03 - should also match",
},
[]string{
"- 2017-01-02 should not match",
"fail",
},
},
{
`^20\d{2}-\d{2}-\d{2}`,
typeOf((*prefixNumDate)(nil)),

View File

@ -36,9 +36,10 @@ type altPrefixMatcher struct {
type prefixNumDate struct {
minLen int
digits []int
prefix []byte
digits []int
seps [][]byte
suffix []byte
}
type emptyStringMatcher struct{}
@ -182,6 +183,12 @@ func (m *prefixNumDate) Match(in []byte) bool {
}
}
if sfx := m.suffix; len(sfx) > 0 {
if !bytes.HasPrefix(in[pos:], sfx) {
return false
}
}
return true
}

View File

@ -11,6 +11,7 @@ var transformations = []trans{
trimRight,
unconcat,
concatRepetition,
flattenRepetition,
}
// optimize runs minimal regular expression optimizations
@ -112,8 +113,8 @@ func unconcat(r *syntax.Regexp) (bool, *syntax.Regexp) {
return false, r
}
// concatRepetition concatenates multiple repeated sub-patterns into
// a repetition of exactly N.
// concatRepetition concatenates 2 consecutive repeated sub-patterns into a
// repetition of length 2.
func concatRepetition(r *syntax.Regexp) (bool, *syntax.Regexp) {
if r.Op != syntax.OpConcat {
@ -204,3 +205,54 @@ func concatRepetition(r *syntax.Regexp) (bool, *syntax.Regexp) {
}
return changed, r
}
// flattenRepetition flattens nested repetitions
func flattenRepetition(r *syntax.Regexp) (bool, *syntax.Regexp) {
if r.Op != syntax.OpConcat {
// don't iterate sub-expressions if top-level is no OpConcat
return false, r
}
sub := r.Sub
inRepetition := false
if isConcatRepetition(r) {
sub = sub[:1]
inRepetition = true
// create flattened regex repetition mulitplying count
// if nexted expression is also a repetition
if s := sub[0]; isConcatRepetition(s) {
count := len(s.Sub) * len(r.Sub)
return true, &syntax.Regexp{
Op: syntax.OpRepeat,
Sub: s.Sub[:1],
Min: count,
Max: count,
Flags: r.Flags | s.Flags,
}
}
}
// recursively check if we can flatten sub-expressions
changed := false
for i, s := range sub {
upd, tmp := flattenRepetition(s)
changed = changed || upd
sub[i] = tmp
}
if !changed {
return false, r
}
// fix up top-level repetition with modified one
tmp := *r
if inRepetition {
for i := range r.Sub {
tmp.Sub[i] = sub[0]
}
} else {
tmp.Sub = sub
}
return changed, &tmp
}

View File

@ -28,9 +28,11 @@ https://github.com/YaSuenag/hsbeat[hsbeat]:: Reads all performance counters in J
https://github.com/christiangalsterer/httpbeat[httpbeat]:: Polls multiple HTTP(S) endpoints and sends the data to
Logstash or Elasticsearch. Supports all HTTP methods and proxies.
https://github.com/jasperla/hwsensorsbeat[hwsensorsbeat]:: Reads sensors information from OpenBSD.
https://github.com/icinga/icingabeat[icingabeat]:: Icingabeat ships events and states from Icinga 2 to Elasticsearch or Logstash.
https://github.com/devopsmakers/iobeat[iobeat]:: Reads IO stats from /proc/diskstats on Linux.
https://github.com/radoondas/jmxproxybeat[jmxproxybeat]:: Reads Tomcat JMX metrics exposed over 'JMX Proxy Servlet' to HTTP.
https://github.com/mheese/journalbeat[journalbeat]:: Used for log shipping from systemd/journald based Linux systems.
https://github.com/dearcode/kafkabeat[kafkabeat]:: read data from kafka with Consumer-groups.
https://github.com/eskibars/lmsensorsbeat[lmsensorsbeat]:: Collects data from lm-sensors (such as CPU temperatures, fan speeds, and voltages from i2c and smbus).
https://github.com/consulthys/logstashbeat[logstashbeat]:: Collects data from Logstash monitoring API (v5 onwards) and indexes them in Elasticsearch.
https://github.com/yedamao/mcqbeat[mcqbeat]:: Reads the status of queues from memcacheq.
@ -39,6 +41,7 @@ https://github.com/adibendahan/mysqlbeat[mysqlbeat]:: Run any query on MySQL and
https://github.com/PhaedrusTheGreek/nagioscheckbeat[nagioscheckbeat]:: For Nagios checks and performance data.
https://github.com/mrkschan/nginxbeat[nginxbeat]:: Reads status from Nginx.
https://github.com/2Fast2BCn/nginxupstreambeat[nginxupstreambeat]:: Reads upstream status from nginx upstream module.
https://github.com/deepujain/nvidiagpubeat/[nvidiagpubeat]:: Uses nvidia-smi to grab metrics of NVIDIA GPUs.
https://github.com/aristanetworks/openconfigbeat[openconfigbeat]:: Streams data from http://openconfig.net[OpenConfig]-enabled network devices
https://github.com/joehillen/packagebeat[packagebeat]:: Collects information about system packages from package
managers.
@ -57,7 +60,7 @@ https://github.com/hartfordfive/udplogbeat[udplogbeat]:: Accept events via local
https://github.com/cleesmith/unifiedbeat[unifiedbeat]:: Reads records from Unified2 binary files generated by
network intrusion detection software and indexes the records in Elasticsearch.
https://github.com/mrkschan/uwsgibeat[uwsgibeat]:: Reads stats from uWSGI.
https://github.com/eskibars/wmibeat[wmibeat]:: Uses WMI to grab your favorite, configurable Windows metrics.
https://github.com/eskibars/wmibeat[wmibeat]:: Uses WMI to grab your favorite, configurable Windows metrics.
Have you created a Beat that's not listed? If so, add the name and description of your Beat to the source document for

View File

@ -199,8 +199,8 @@ field references `[fieldname]`. Optional default values can be specified in case
field name is missing from the event.
You can also format time stored in the
`@timestamp` field using the `+FORMAT` syntax where FORMAT is a valid (time
format)[https://godoc.org/github.com/elastic/beats/libbeat/common/dtfmt].
`@timestamp` field using the `+FORMAT` syntax where FORMAT is a valid https://godoc.org/github.com/elastic/beats/libbeat/common/dtfmt[time
format].
["source","yaml",subs="attributes"]
------------------------------------------------------------------------------
@ -375,44 +375,5 @@ individual settings can be overwritten using `-E <setting>=<value>`.
[[config-file-format-tips]]
=== YAML Tips and Gotchas
When you edit the configuration file, there are a few things that you should know.
[float]
==== Use Spaces for Indentation
Indentation is meaningful in YAML. Make sure that you use spaces, rather than
tab characters, to indent sections.
In the default configuration files and in all the examples in the documentation,
we use 2 spaces per indentation level. We recommend you do the same.
[float]
==== Look at the Default Config File for Structure
The best way to understand where to define a configuration option is by looking
at the provided sample configuration files. The configuration files contain most
of the default configurations that are available per beat. To change a setting,
simply uncomment the line and change the values.
[float]
==== Test Your Config File
You can test your configuration file to verify that the structure is valid.
Simply change to the directory where the binary is installed, and run
your Beat in the foreground with the `-configtest` flag specified. For example:
["source","yaml",subs="attributes,callouts"]
----------------------------------------------------------------------
filebeat -c filebeat.yml -configtest
----------------------------------------------------------------------
You'll see a message if an error in the configuration file is found.
[float]
==== Wrap Regular Expressions in Single Quotation Marks
If you need to specify a regular expression in a YAML file, it's a good idea to
wrap the regular expression in single quotation marks to work around YAML's
tricky rules for string escaping.
For more information about YAML, see http://yaml.org/.
:allplatforms:
include::yaml.asciidoc[]

View File

@ -101,7 +101,7 @@ pattern is selected to see {beatname_uc} data.
image:./images/kibana-created-indexes.png[Discover tab with index selected]
To open the loaded dashboards, go to the *Dashboard* page and click *Open*.
Select the dashboard that you want to open.
To open the loaded dashboards, go to the *Dashboard* page and select the
dashboard that you want to open.
image:./images/kibana-navigation-vis.png[Navigation widget in Kibana]

View File

@ -11,22 +11,24 @@
//////////////////////////////////////////////////////////////////////////
[[configuration-dashboards]]
=== Dashboards Configuration
=== Dashboards
beta[]
The `dashboards` section of the +{beatname_lc}.yml+ config file contains options
for the automatic loading of the sample Beats dashboards. The loading of the
dashboards is disabled by default, but can be enabled either from the configuration
for automatically loading the sample Beats dashboards. Automatic dashboard
loading is disabled by default, but can be enabled either from the configuration
file or by using the `-setup` CLI flag.
If dashboard loading is enabled, {beatname_uc} attempts to configure Kibana by
writing directly in the Elasticsearch index for the Kibana configuration (by
default, `.kibana`). To connect to Elasticsearch, it uses the settings defined
in the Eleasticsearch output. If the Elasticsearch output is not configured or
not enabled, {beatname_uc} will stop with an error. Loading the dashboards is
only attempted at the Beat start, if Elasticsearch is not available when the
Beat starts, {beatname_uc} will stop with an error.
default, `.kibana`). To connect to Elasticsearch, {beatname_uc} uses the
settings defined in the Elasticsearch output. If the Elasticsearch output is
not configured or not enabled, {beatname_uc} will stop with an error. Dashboard
loading is only attempted at Beat startup. If Elasticsearch is not available when
the Beat starts, {beatname_uc} will stop with an error.
Here is an example configuration:
[source,yaml]
------------------------------------------------------------------------------
@ -40,48 +42,64 @@ You can specify the following options in the `dashboards` section of the
===== enabled
If enabled, load the sample Kibana dashboards on startup. If no other options
are set, the dashboards archive is downloaded from the elastic.co website.
If this option is set to true, {beatname_uc} loads the sample Kibana dashboards
automatically on startup. If no other options are set, the dashboard archive is
downloaded from the elastic.co website.
To load dashboards from a different location, you can
configure one of the following options: <<url-option,`url`>>,
<<directory-option,`directory`>>, or <<file-option,`file`>>.
To load dashboards from a snapshot URL, use the <<snapshot-option,`snapshot`>>
option and optionally <<snapshot-url-option,`snapshot-url`>>.
[[url-option]]
===== url
The URL from where to download the dashboards archive. By default this URL has a
value which is computed based on the Beat name and version. For released
versions, this URL points to the dashboard archive on the artifacts.elastic.co
The URL to use for downloading the dashboard archive. By default this URL
is computed based on the Beat name and version. For released versions,
this URL points to the dashboard archive on the artifacts.elastic.co
website.
[[directory-option]]
===== directory
The directory from where to read the dashboards. It is used instead of the URL
when it has a value.
The directory that contains the dashboards to load. If this option is set,
{beatname_uc} looks for dashboards in the specified directory instead of
downloading an archive from a URL.
[[file-option]]
===== file
The file archive (zip file) from where to read the dashboards. It is used
instead of the URL when it has a value.
The file archive (zip file) that contains the dashboards to load. If this option
is set, {beatname_uc} looks for a dashboard archive in the specified path
instead of downloading the archive from a URL.
[[snapshot-option]]
===== snapshot
If this option is set to true, the snapshot URL is used instead of the default
URL.
[[snapshot-url-option]]
===== snapshot_url
The URL from where to download the snapshot version of the dashboards. By
default this has a value which is computed based on the Beat name and version.
The URL to use for downloading the snapshot version of the dashboards. By
default the snapshot URL is computed based on the Beat name and version.
===== beat
In case the archive contains the dashboards from multiple Beats, this lets you
select which one to load. You can load all the dashboards in the archive by
setting this to the empty string. The default is "{beatname_lc}".
In case the archive contains the dashboards for multiple Beats, this setting
lets you select the Beat for which you want to load dashboards. To load all the
dashboards in the archive, set this option to an empty string. The default is
+"{beatname_lc}"+.
===== kibana_index
The name of the Kibana index to use for setting the configuration. Default is
".kibana"
The name of the Kibana index to use for setting the configuration. The default
is `".kibana"`
===== index
The Elasticsearch index name. This overwrites the index name defined in the
dashboards and index pattern. Example: "testbeat-*"
The Elasticsearch index name. This setting overwrites the index name defined
in the dashboards and index pattern. Example: `"testbeat-*"`

View File

@ -11,7 +11,7 @@
//////////////////////////////////////////////////////////////////////////
[[configuration-general]]
=== General Configuration
=== General
The general section of the +{beatname_lc}.yml+ config file contains configuration options for the Beat and some
general settings that control its behaviour.

View File

@ -39,6 +39,14 @@ mac>> for OS X, and <<win, win>> for Windows):
[[deb]]*deb:*
ifeval::["{release-state}"=="unreleased"]
Version {stack-version} of Elasticsearch has not yet been released.
endif::[]
ifeval::["{release-state}"!="unreleased"]
["source","sh",subs="attributes,callouts"]
----------------------------------------------------------------------
sudo apt-get install openjdk-8-jre
@ -47,8 +55,18 @@ sudo dpkg -i elasticsearch-{ES-version}.deb
sudo /etc/init.d/elasticsearch start
----------------------------------------------------------------------
endif::[]
[[rpm]]*rpm:*
ifeval::["{release-state}"=="unreleased"]
Version {stack-version} of Elasticsearch has not yet been released.
endif::[]
ifeval::["{release-state}"!="unreleased"]
["source","sh",subs="attributes,callouts"]
----------------------------------------------------------------------
sudo yum install java-1.8.0-openjdk
@ -57,8 +75,18 @@ sudo rpm -i elasticsearch-{ES-version}.rpm
sudo service elasticsearch start
----------------------------------------------------------------------
endif::[]
[[mac]]*mac:*
ifeval::["{release-state}"=="unreleased"]
Version {stack-version} of Elasticsearch has not yet been released.
endif::[]
ifeval::["{release-state}"!="unreleased"]
["source","sh",subs="attributes,callouts"]
----------------------------------------------------------------------
# install Java, e.g. from: https://www.java.com/en/download/manual.jsp
@ -68,8 +96,18 @@ cd elasticsearch-{ES-version}
./bin/elasticsearch
----------------------------------------------------------------------
endif::[]
[[win]]*win:*
ifeval::["{release-state}"=="unreleased"]
Version {stack-version} of Elasticsearch has not yet been released.
endif::[]
ifeval::["{release-state}"!="unreleased"]
. If necessary, download and install the latest version of the Java from https://www.java.com[www.java.com].
. Download the Elasticsearch {ES-version} Windows zip file from the
@ -91,6 +129,8 @@ cd C:\Program Files\elasticsearch-{ES-version}
bin\elasticsearch.bat
----------------------------------------------------------------------
endif::[]
You can learn more about installing, configuring, and running Elasticsearch in the
https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html[Elasticsearch Reference].
@ -147,6 +187,14 @@ with your system:
*deb:*
ifeval::["{release-state}"=="unreleased"]
Version {stack-version} of Logstash has not yet been released.
endif::[]
ifeval::["{release-state}"!="unreleased"]
["source","sh",subs="attributes,callouts"]
----------------------------------------------------------------------
sudo apt-get install openjdk-8-jre
@ -154,8 +202,18 @@ curl -L -O https://artifacts.elastic.co/downloads/logstash/logstash-{LS-version}
sudo dpkg -i logstash-{LS-version}.deb
----------------------------------------------------------------------
endif::[]
*rpm:*
ifeval::["{release-state}"=="unreleased"]
Version {stack-version} of Logstash has not yet been released.
endif::[]
ifeval::["{release-state}"!="unreleased"]
["source","sh",subs="attributes,callouts"]
----------------------------------------------------------------------
sudo yum install java-1.8.0-openjdk
@ -163,8 +221,18 @@ curl -L -O https://artifacts.elastic.co/downloads/logstash/logstash-{LS-version}
sudo rpm -i logstash-{LS-version}.rpm
----------------------------------------------------------------------
endif::[]
*mac:*
ifeval::["{release-state}"=="unreleased"]
Version {stack-version} of Logstash has not yet been released.
endif::[]
ifeval::["{release-state}"!="unreleased"]
["source","sh",subs="attributes,callouts"]
----------------------------------------------------------------------
# install Java, e.g. from: https://www.java.com/en/download/manual.jsp
@ -172,8 +240,18 @@ curl -L -O https://artifacts.elastic.co/downloads/logstash/logstash-{LS-version}
unzip logstash-{LS-version}.zip
----------------------------------------------------------------------
endif::[]
*win:*
ifeval::["{release-state}"=="unreleased"]
Version {stack-version} of Logstash has not yet been released.
endif::[]
ifeval::["{release-state}"!="unreleased"]
. If necessary, download and install the latest version of the Java from https://www.java.com[www.java.com].
. Download the Logstash {LS-version} Windows zip file from the
@ -183,13 +261,14 @@ https://www.elastic.co/downloads/logstash[downloads page].
Don't start Logstash yet. You need to set a couple of configuration options first.
endif::[]
[[logstash-setup]]
==== Setting Up Logstash
In this setup, the Beat sends events to Logstash. Logstash receives
these events by using the
https://www.elastic.co/guide/en/logstash/current/plugins-inputs-beats.html[Beats input plugin for Logstash] and then sends the transaction to Elasticsearch by using the
http://www.elastic.co/guide/en/logstash/current/plugins-outputs-elasticsearch.html[Elasticsearch
these events by using the {logstashdoc}/plugins-inputs-beats.html[Beats input plugin for Logstash] and then sends the transaction to Elasticsearch by using the
{logstashdoc}/plugins-outputs-elasticsearch.html[Elasticsearch
output plugin for Logstash]. The Elasticsearch output plugin uses the bulk API, making
indexing very efficient.
@ -225,6 +304,7 @@ and to index into Elasticsearch. You configure Logstash by creating a
configuration file. For example, you can save the following example configuration
to a file called `logstash.conf`:
+
--
[source,ruby]
------------------------------------------------------------------------------
input {
@ -237,15 +317,22 @@ output {
elasticsearch {
hosts => "localhost:9200"
manage_template => false
index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}"
document_type => "%{[@metadata][type]}"
index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}" <1>
document_type => "%{[@metadata][type]}" <2>
}
}
------------------------------------------------------------------------------
+
<1> `%{[@metadata][beat]}` sets the first part of the index name to the value
of the `beat` metadata field, and `%{+YYYY.MM.dd}` sets the second part of the
name to a date based on the Logstash `@timestamp` field. For example:
+{beatname_lc}-2017.03.29+.
<2> `%{[@metadata][type]}` sets the document type based on the value of the `type`
metadata field.
Logstash uses this configuration to index events in Elasticsearch in the same
way that the Beat would, but you get additional buffering and other capabilities
provided by Logstash.
--
To use this setup, you'll also need to configure your Beat to use Logstash. For more information, see the documentation for the Beat.
@ -334,6 +421,14 @@ Use the following commands to download and run Kibana.
*deb or rpm:*
ifeval::["{release-state}"=="unreleased"]
Version {stack-version} of Kibana has not yet been released.
endif::[]
ifeval::["{release-state}"!="unreleased"]
["source","sh",subs="attributes,callouts"]
----------------------------------------------------------------------
curl -L -O https://artifacts.elastic.co/downloads/kibana/kibana-{Kibana-version}-linux-x86_64.tar.gz
@ -342,8 +437,18 @@ cd kibana-{Kibana-version}-linux-x86_64/
./bin/kibana
----------------------------------------------------------------------
endif::[]
*mac:*
ifeval::["{release-state}"=="unreleased"]
Version {stack-version} of Kibana has not yet been released.
endif::[]
ifeval::["{release-state}"!="unreleased"]
["source","sh",subs="attributes,callouts"]
----------------------------------------------------------------------
curl -L -O https://artifacts.elastic.co/downloads/kibana/kibana-{Kibana-version}-darwin-x86_64.tar.gz
@ -352,8 +457,18 @@ cd kibana-{Kibana-version}-darwin-x86_64/
./bin/kibana
----------------------------------------------------------------------
endif::[]
*win:*
ifeval::["{release-state}"=="unreleased"]
Version {stack-version} of Kibana has not yet been released.
endif::[]
ifeval::["{release-state}"!="unreleased"]
. Download the Kibana {Kibana-version} Windows zip file from the
https://www.elastic.co/downloads/kibana[downloads page].
@ -374,6 +489,8 @@ cd C:\Program Files\kibana-{Kibana-version}-windows
bin\kibana.bat
----------------------------------------------------------------------
endif::[]
You can find Kibana binaries for other operating systems on the
https://www.elastic.co/downloads/kibana[Kibana downloads page].

View File

@ -9,6 +9,7 @@ include::./version.asciidoc[]
:winlogbeat: http://www.elastic.co/guide/en/beats/winlogbeat/{doc-branch}
:heartbeat: http://www.elastic.co/guide/en/beats/heartbeat/{doc-branch}
:securitydoc: https://www.elastic.co/guide/en/x-pack/5.2
:logstashdoc: https://www.elastic.co/guide/en/logstash/{doc-branch}
:beatname_lc: beatname
:beatname_uc: a Beat
:security: X-Pack Security
@ -33,14 +34,10 @@ include::./upgrading.asciidoc[]
include::./config-file-format.asciidoc[]
pass::[<?page_header Always refer to the documentation in master for the latest information about contributing to Beats.?>]
include::./newbeat.asciidoc[]
include::./event-conventions.asciidoc[]
include::./newdashboards.asciidoc[]
pass::[<?page_header ?>]
include::./release.asciidoc[]

View File

@ -11,7 +11,7 @@
//////////////////////////////////////////////////////////////////////////
[[configuration-logging]]
=== Logging Configuration
=== Logging
The `logging` section of the +{beatname_lc}.yml+ config file contains options
for configuring the Beats logging output. The logging system can write logs to

View File

@ -188,6 +188,8 @@ To compile the Beat, make sure you are in the Beat directory (`$GOPATH/src/githu
make
---------
NOTE: we don't support the `-j` option for make at the moment.
Running this command creates the binary called `countbeat` in `$GOPATH/src/github.com/{user}/countbeat`.
Now run the Beat:

View File

@ -11,7 +11,7 @@
//////////////////////////////////////////////////////////////////////////
[[elasticsearch-output]]
=== Elasticsearch Output Configuration
=== Elasticsearch Output
When you specify Elasticsearch for the output, the Beat sends the transactions directly to Elasticsearch by using the Elasticsearch HTTP API.
@ -353,33 +353,54 @@ See <<configuration-output-ssl>> for more information.
[[logstash-output]]
=== Logstash Output Configuration
=== Logstash Output
*Prerequisite:* To use Logstash as an output, you must
{libbeat}/logstash-installation.html#logstash-setup[install and configure] the Beats input
plugin for Logstash.
The Logstash output sends the events directly to Logstash by using the lumberjack
protocol, which runs over TCP. To use this option, you must
{libbeat}/logstash-installation.html#logstash-setup[install and configure] the Beats input
plugin for Logstash. Logstash allows for additional processing and routing of
protocol, which runs over TCP. Logstash allows for additional processing and routing of
generated events.
Every event sent to Logstash contains additional metadata for indexing and filtering:
Here is an example of how to configure {beatname_uc} to use Logstash:
[source,json]
["source","yaml",subs="attributes"]
------------------------------------------------------------------------------
output.logstash:
hosts: ["localhost:5044"]
------------------------------------------------------------------------------
==== Accessing Metadata Fields
Every event sent to Logstash contains the following metadata fields that you can
use in Logstash for indexing and filtering:
["source","json",subs="attributes"]
------------------------------------------------------------------------------
{
...
"@metadata": {
"beat": "<beat>",
"type": "<event type>"
"@metadata": { <1>
"beat": "{beatname_lc}", <2>
"type": "<event type>" <3>
}
}
------------------------------------------------------------------------------
<1> {beatname_uc} uses the `@metadata` field to send metadata to Logstash. The
contents of the `@metadata` field only exist in Logstash and are not part of any
events sent from Logstash. See the
{logstashdoc}/event-dependent-configuration.html#metadata[Logstash documentation]
for more about the `@metadata` field.
<2> The default is {beatname_lc}. To change this value, set the
<<logstash-index,`index`>> option in the {beatname_uc} config file.
<3> The value of `type` varies depending on the event type.
You can access this metadata from within the Logstash config file to set values
dynamically based on the contents of the metadata.
In Logstash, you can configure the Elasticsearch output plugin to use the
metadata and event type for indexing.
The following Logstash configuration file for the versions 2.x and 5.x sets Logstash to
use the index and document type reported by Beats for indexing events into Elasticsearch.
The index used will depend on the `@timestamp` field as identified by Logstash.
For example, the following Logstash configuration file for versions 2.x and
5.x sets Logstash to use the index and document type reported by Beats for
indexing events into Elasticsearch:
[source,logstash]
------------------------------------------------------------------------------
@ -393,24 +414,21 @@ input {
output {
elasticsearch {
hosts => ["http://localhost:9200"]
index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}"
document_type => "%{[@metadata][type]}"
index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}" <1>
document_type => "%{[@metadata][type]}" <2>
}
}
------------------------------------------------------------------------------
<1> `%{[@metadata][beat]}` sets the first part of the index name to the value
of the `beat` metadata field, and `%{+YYYY.MM.dd}` sets the second part of the
name to a date based on the Logstash `@timestamp` field. For example:
+{beatname_lc}-2017.03.29+.
<2> `%{[@metadata][type]}` sets the document type based on the value of the `type`
metadata field.
Events indexed into Elasticsearch with the Logstash configuration shown here
will be similar to events directly indexed by Beats into Elasticsearch.
Here is an example of how to configure the Beat to use Logstash:
["source","yaml",subs="attributes"]
------------------------------------------------------------------------------
output.logstash:
hosts: ["localhost:5044"]
index: {beatname_lc}
------------------------------------------------------------------------------
==== Compatibility
This output works with all compatible versions of Logstash. See "Supported Beats Versions" in the https://www.elastic.co/support/matrix#show_compatibility[Elastic Support Matrix].
@ -510,6 +528,7 @@ The `proxy_use_local_resolver` option determines if Logstash hostnames are
resolved locally when using a proxy. The default value is false which means
that when a proxy is used the name resolution occurs on the proxy server.
[[logstash-index]]
===== index
The index root name to write events to. The default is the Beat name.
@ -556,7 +575,7 @@ Elasticsearch. Beats that publish data in batches (such as Filebeat) send events
spooler size.
[[kafka-output]]
=== Kafka Output Configuration
=== Kafka Output
The Kafka output sends the events to Apache Kafka.
@ -754,7 +773,7 @@ Configuration options for SSL parameters like the root CA for Kafka connections.
<<configuration-output-ssl>> for more information.
[[redis-output]]
=== Redis Output Configuration
=== Redis Output
The Redis output inserts the events into a Redis list or a Redis channel.
This output plugin is compatible with
@ -975,7 +994,7 @@ This option determines whether Redis hostnames are resolved locally when using a
The default value is false, which means that name resolution occurs on the proxy server.
[[file-output]]
=== File Output Configuration
=== File Output
The File output dumps the transactions into a file where each transaction is in a JSON format.
Currently, this output is used for testing, but it can be used as input for
@ -1030,7 +1049,7 @@ Output codec configuration. If the `codec` section is missing, events will be js
See <<configuration-output-codec>> for more information.
[[console-output]]
=== Console Output Configuration
=== Console Output
The Console output writes events in JSON format to stdout.
@ -1073,7 +1092,7 @@ Setting `bulk_max_size` to 0 disables buffering in libbeat.
[[configuration-output-ssl]]
=== SSL Configuration
=== SSL
You can specify SSL options for any output that supports SSL.
@ -1209,7 +1228,7 @@ The following elliptic curve types are available:
* P-521
[[configuration-output-codec]]
=== Output Codec Configuration
=== Output Codec
For outputs that do not require a specific encoding, you can change the encoding
by using the codec configuration. You can specify either the `json` or `format`

View File

@ -6,6 +6,7 @@
--
This section summarizes the changes in each release.
* <<release-notes-5.3.1>>
* <<release-notes-5.3.0>>
* <<release-notes-5.2.2>>
* <<release-notes-5.2.1>>

View File

@ -25,6 +25,14 @@ to sign all our packages. It is available from https://pgp.mit.edu.
[float]
==== APT
ifeval::["{release-state}"=="unreleased"]
Version {stack-version} of Beats has not yet been released.
endif::[]
ifeval::["{release-state}"!="unreleased"]
To add the Beats repository for APT:
. Download and install the Public Signing Key:
@ -62,6 +70,21 @@ the following:
Simply delete the `deb-src` entry from the `/etc/apt/sources.list` file, and the installation should work as expected.
==================================================
ifeval::["{beatname_uc}"=="Heartbeat"]
. On Debian or Ubuntu, pin the repository before installing to ensure that the
correct Elastic Heartbeat package is installed. To do this, edit
`/etc/apt/preferences` (or `/etc/apt/preferences.d/heartbeat`) as follows:
+
[source,shell]
--------------------------------------------------
Package: heartbeat
Pin: origin artifacts.elastic.co
Pin-Priority: 700
--------------------------------------------------
endif::[]
. Run `apt-get update`, and the repository is ready for use. For example, you can
install {beatname_uc} by running:
+
@ -77,9 +100,19 @@ sudo apt-get update && sudo apt-get install {beatname_lc}
sudo update-rc.d {beatname_lc} defaults 95 10
--------------------------------------------------
endif::[]
[float]
==== YUM
ifeval::["{release-state}"=="unreleased"]
Version {stack-version} of Beats has not yet been released.
endif::[]
ifeval::["{release-state}"!="unreleased"]
To add the Beats repository for YUM:
. Download and install the public signing key:
@ -118,3 +151,6 @@ sudo yum install {beatname_lc}
--------------------------------------------------
sudo chkconfig --add {beatname_lc}
--------------------------------------------------
endif::[]

View File

@ -50,7 +50,7 @@ To add the pipeline in Elasticsearch, you would run:
[source,shell]
------------------------------------------------------------------------------
curl -XPUT 'http://localhost:9200/_ingest/pipeline/test-pipeline' -d@pipeline.json
curl -H 'Content-Type: application/json' -XPUT 'http://localhost:9200/_ingest/pipeline/test-pipeline' -d@pipeline.json
------------------------------------------------------------------------------
Then in the +{beatname_lc}.yml+ file, you would specify:

View File

@ -21,6 +21,19 @@ You may encounter errors loading the config file on POSIX operating systems if:
See {libbeat}/config-file-permissions.html[Config File Ownership and Permissions]
for more about resolving these errors.
[float]
[[error-found-unexpected-character]]
=== Found Unexpected or Unknown Characters?
Either there is a problem with the structure of your config file, or you have
used a path or expression that the YAML parser cannot resolve because the config
file contains characters that aren't properly escaped.
If the YAML file contains paths with spaces or unusual characters, wrap the
paths in single quotation marks (see <<wrap-paths-in-quotes>>).
Also see the general advice under <<yaml-tips>>.
[float]
[[connection-problem]]
=== Logstash connection doesn't work?

View File

@ -9,6 +9,10 @@
//// include::../../libbeat/docs/shared-logstash-config.asciidoc[]
//////////////////////////////////////////////////////////////////////////
*Prerequisite:* To use Logstash as an output, you must also
{libbeat}/logstash-installation.html#logstash-setup[set up Logstash] to receive events
from Beats.
If you want to use Logstash to perform additional processing on the data collected by
{beatname_uc}, you need to configure {beatname_uc} to use Logstash.
@ -47,7 +51,4 @@ options specified: +.\winlogbeat.exe -c .\winlogbeat.yml -configtest -e+.
endif::win[]
To use this configuration, you must also
{libbeat}/logstash-installation.html#logstash-setup[set up Logstash] to receive events
from Beats.

View File

@ -11,7 +11,7 @@
//////////////////////////////////////////////////////////////////////////
[[configuration-path]]
=== Paths Configuration
=== Paths
The `path` section of the +{beatname_lc}.yml+ config file contains configuration
options that define where the Beat looks for its files. For example, all Beats

View File

@ -67,7 +67,7 @@ ifdef::allplatforms[]
["source","sh",subs="attributes,callouts"]
----------------------------------------------------------------------
curl -XPUT 'http://localhost:9200/_template/{beatname_lc}' -d@/etc/{beatname_lc}/{beatname_lc}.template.json
curl -H 'Content-Type: application/json' -XPUT 'http://localhost:9200/_template/{beatname_lc}' -d@/etc/{beatname_lc}/{beatname_lc}.template.json
----------------------------------------------------------------------
*mac:*
@ -75,7 +75,7 @@ curl -XPUT 'http://localhost:9200/_template/{beatname_lc}' -d@/etc/{beatname_lc}
["source","sh",subs="attributes,callouts"]
----------------------------------------------------------------------
cd {beatname_lc}-{version}-darwin-x86_64
curl -XPUT 'http://localhost:9200/_template/{beatname_lc}' -d@{beatname_lc}.template.json
curl -H 'Content-Type: application/json' -XPUT 'http://localhost:9200/_template/{beatname_lc}' -d@{beatname_lc}.template.json
----------------------------------------------------------------------
*win:*
@ -84,7 +84,7 @@ endif::allplatforms[]
["source","sh",subs="attributes,callouts"]
----------------------------------------------------------------------
PS C:\Program Files{backslash}{beatname_uc}> Invoke-WebRequest -Method Put -InFile {beatname_lc}.template.json -Uri http://localhost:9200/_template/{beatname_lc}?pretty
PS C:\Program Files{backslash}{beatname_uc}> Invoke-WebRequest -Method Put -InFile {beatname_lc}.template.json -Uri http://localhost:9200/_template/{beatname_lc}?pretty -ContentType application/json
----------------------------------------------------------------------
where `localhost:9200` is the IP and port where Elasticsearch is listening.

View File

@ -1,3 +1,4 @@
:stack-version: 5.3.0
:stack-version: 5.3.1
:doc-branch: 5.3
:go-version: 1.7.4
:release-state: released

View File

@ -9,31 +9,38 @@
//// include::../../libbeat/docs/yaml.asciidoc[]
//////////////////////////////////////////////////////////////////////////
ifdef::standalone[]
[[yaml-tips]]
== YAML Tips and Gotchas
The {beatname_uc} configuration file uses http://yaml.org/[YAML] for its syntax. When you edit the
endif::[]
The configuration file uses http://yaml.org/[YAML] for its syntax. When you edit the
file to modify configuration settings, there are a few things that you should know.
[float]
=== Use Spaces for Indentation
Indentation is meaningful in YAML. Make sure that you use spaces, rather than tab characters, to indent sections.
Indentation is meaningful in YAML. Make sure that you use spaces, rather than tab characters, to indent sections.
In the default configuration files and in all the examples in the documentation,
we use 2 spaces per indentation level. We recommend you do the same.
[float]
=== Look at the Default Config File for Structure
The best way to understand where to define a configuration option is by looking at
the {beatname_lc}.yml configuration file. The configuration file contains most of the
configuration options that are available for {beatname_uc}. To change a configuration setting,
simply uncomment the line and change the setting.
The best way to understand where to define a configuration option is by looking
at the provided sample configuration files. The configuration files contain most
of the default configurations that are available for the Beat. To change a setting,
simply uncomment the line and change the values.
[float]
=== Test Your Config File
You can test your configuration file to verify that the structure is valid.
Simply change to the directory where the binary is installed, and run
{beatname_uc} in the foreground with the `-configtest` flag specified. For example:
the Beat in the foreground with the `-configtest` flag specified. For example:
ifdef::allplatforms[]
@ -53,11 +60,33 @@ ifdef::win[]
endif::win[]
You'll see a message if {beatname_uc} finds an error in the file.
You'll see a message if the Beat finds an error in the file.
[float]
=== Wrap Regular Expressions in Single Quotation Marks
If you need to specify a regular expression in a YAML file, it's a good idea to wrap the regular expression in single quotation marks to work around YAML's tricky rules for string escaping.
For more information about YAML, see http://yaml.org/.
For more information about YAML, see http://yaml.org/.
[float]
[[wrap-paths-in-quotes]]
=== Wrap Paths in Single Quotation Marks
Windows paths in particular sometimes contain spaces or characters, such as drive
letters or triple dots, that may be misinterpreted by the YAML parser.
To avoid this problem, it's a good idea to wrap paths in single quotation marks.
[float]
[[avoid-leading-zeros]]
=== Avoid Using Leading Zeros in Numeric Values
If you use a leading zero (for example, `09`) in a numeric field without
wrapping the value in single quotation marks, the value may be interpreted
incorrectly by the YAML parser. If the value is a valid octal, it's converted
to an integer. If not, it's converted to a float.
To prevent unwanted type conversions, avoid using leading zeros in field values,
or wrap the values in single quotation marks.

View File

@ -92,3 +92,32 @@
- name: meta.cloud.region
description: >
Region in which this host is running.
- key: kubernetes
title: Kubernetes info
description: >
Kubernetes metadata added by the kubernetes processor
fields:
- name: kubernetes.pod.name
type: keyword
description: >
Kubernetes pod name
- name: kubernetes.namespace
type: keyword
description: >
Kubernetes namespace
- name: kubernetes.labels
type: object
description: >
Kubernetes labels map
- name: kubernetes.annotations
type: object
description: >
Kubernetes annotations map
- name: kubernetes.container.name
type: keyword
description: >
Kubernetes container name

View File

@ -327,7 +327,7 @@ install-home:
if [ -d _meta/module.generated ]; then \
install -d -m 755 ${HOME_PREFIX}/module; \
rsync -av _meta/module.generated/ ${HOME_PREFIX}/module/; \
chmod -R go-w _meta/module.generated; \
chmod -R go-w ${HOME_PREFIX}/module/; \
fi
# Prepares for packaging. Builds binaries and creates homedir data

View File

@ -1,359 +0,0 @@
########################## Metricbeat Configuration ###########################
# This file is a full configuration example documenting all non-deprecated
# options in comments. For a shorter configuration example, that contains only
# the most common options, please see metricbeat.yml in the same directory.
#
# You can find the full configuration reference here:
# https://www.elastic.co/guide/en/beats/metricbeat/index.html
#============================ Config Reloading ===============================
# Config reloading allows to dynamically load modules. Each file which is
# monitored must contain one or multiple modules as a list.
metricbeat.config.modules:
# Glob pattern for configuration reloading
path: ${path.config}/conf.d/*.yml
# Period on which files under path should be checked for chagnes
reload.period: 10s
# Set to true to enable config reloading
reload.enabled: false
#========================== Modules configuration ============================
metricbeat.modules:
#------------------------------- System Module -------------------------------
- module: system
metricsets:
# CPU stats
- cpu
# System Load stats
- load
# Per CPU core stats
#- core
# IO stats
#- diskio
# Per filesystem stats
- filesystem
# File system summary stats
- fsstat
# Memory stats
- memory
# Network stats
- network
# Per process stats
- process
# Sockets and connection info (linux only)
#- socket
enabled: true
period: 10s
processes: ['.*']
# if true, exports the CPU usage in ticks, together with the percentage values
#cpu_ticks: false
# If false, cmdline of a process is not cached.
#process.cmdline.cache.enabled: true
# Enable collection of cgroup metrics from processes on Linux.
#process.cgroups.enabled: true
# A list of regular expressions used to whitelist environment variables
# reported with the process metricset's events. Defaults to empty.
#process.env.whitelist: []
# Configure reverse DNS lookup on remote IP addresses in the socket metricset.
#socket.reverse_lookup.enabled: false
#socket.reverse_lookup.success_ttl: 60s
#socket.reverse_lookup.failure_ttl: 60s
#------------------------------- Apache Module -------------------------------
#- module: apache
#metricsets: ["status"]
#enabled: true
#period: 10s
# Apache hosts
#hosts: ["http://127.0.0.1"]
# Path to server status. Default server-status
#server_status_path: "server-status"
# Username of hosts. Empty by default
#username: test
# Password of hosts. Empty by default
#password: test123
#-------------------------------- ceph Module --------------------------------
#- module: ceph
# metricsets: ["cluster_disk", "cluster_health", "monitor_health", "pool_disk"]
# enabled: true
# period: 10s
# hosts: ["localhost:5000"]
#------------------------------ Couchbase Module -----------------------------
#- module: couchbase
#metricsets: ["cluster", "node", "bucket"]
#enabled: true
#period: 10s
#hosts: ["localhost:8091"]
#------------------------------- Docker Module -------------------------------
#- module: docker
#metricsets: ["container", "cpu", "diskio", "healthcheck", "info", "memory", "network"]
#hosts: ["unix:///var/run/docker.sock"]
#enabled: true
#period: 10s
# To connect to Docker over TLS you must specify a client and CA certificate.
#ssl:
#certificate_authority: "/etc/pki/root/ca.pem"
#certificate: "/etc/pki/client/cert.pem"
#key: "/etc/pki/client/cert.key"
#---------------------------- elasticsearch Module ---------------------------
#- module: elasticsearch
# metricsets: ["node", "node_stats", "stats"]
# enabled: true
# period: 10s
# hosts: ["localhost:9200"]
#------------------------------- golang Module -------------------------------
#- module: golang
# metricsets: ["expvar","heap"]
# enabled: true
# period: 10s
# hosts: ["localhost:6060"]
# heap.path: "/debug/vars"
# expvar:
# namespace: "example"
# path: "/debug/vars"
#------------------------------- HAProxy Module ------------------------------
#- module: haproxy
#metricsets: ["info", "stat"]
#enabled: true
#period: 10s
#hosts: ["tcp://127.0.0.1:14567"]
#------------------------------- Jolokia Module ------------------------------
#- module: jolokia
# metricsets: ["jmx"]
# enabled: true
# period: 10s
# hosts: ["localhost"]
# namespace: "metrics"
# path: "/jolokia/?ignoreErrors=true&canonicalNaming=false"
# jmx.mapping:
# jmx.application:
# jmx.instance:
#-------------------------------- kafka Module -------------------------------
#- module: kafka
#metricsets: ["partition"]
#enabled: true
#period: 10s
#hosts: ["localhost:9092"]
#client_id: metricbeat
#retries: 3
#backoff: 250ms
# List of Topics to query metadata for. If empty, all topics will be queried.
#topics: []
# Optional SSL. By default is off.
# List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
#ssl.key: "/etc/pki/client/cert.key"
# SASL authentication
#username: ""
#password: ""
#------------------------------- kibana Module -------------------------------
- module: kibana
metricsets: ["status"]
enabled: true
period: 10s
hosts: ["localhost:5601"]
#------------------------------- kubelet Module ------------------------------
#- module: kubelet
# metricsets: ["node","container","volume","pod","system"]
# enabled: true
# period: 10s
# hosts: ["localhost:10255"]
#------------------------------ memcached Module -----------------------------
- module: memcached
metricsets: ["stats"]
enabled: true
period: 10s
hosts: ["localhost:11211"]
#------------------------------- MongoDB Module ------------------------------
#- module: mongodb
#metricsets: ["dbstats", "status"]
#enabled: true
#period: 10s
# The hosts must be passed as MongoDB URLs in the format:
# [mongodb://][user:pass@]host[:port].
# The username and password can also be set using the respective configuration
# options. The credentials in the URL take precedence over the username and
# password configuration options.
#hosts: ["localhost:27017"]
# Username to use when connecting to MongoDB. Empty by default.
#username: user
# Password to use when connecting to MongoDB. Empty by default.
#password: pass
#-------------------------------- MySQL Module -------------------------------
#- module: mysql
#metricsets: ["status"]
#enabled: true
#period: 10s
# Host DSN should be defined as "user:pass@tcp(127.0.0.1:3306)/"
# The username and password can either be set in the DSN or using the username
# and password config options. Those specified in the DSN take precedence.
#hosts: ["root:secret@tcp(127.0.0.1:3306)/"]
# Username of hosts. Empty by default.
#username: root
# Password of hosts. Empty by default.
#password: secret
# By setting raw to true, all raw fields from the status metricset will be added to the event.
#raw: false
#-------------------------------- Nginx Module -------------------------------
#- module: nginx
#metricsets: ["stubstatus"]
#enabled: true
#period: 10s
# Nginx hosts
#hosts: ["http://127.0.0.1"]
# Path to server status. Default server-status
#server_status_path: "server-status"
#------------------------------- php_fpm Module ------------------------------
#- module: php_fpm
#metricsets: ["pool"]
#enabled: true
#period: 10s
#status_path: "/status"
#hosts: ["localhost:8080"]
#----------------------------- PostgreSQL Module -----------------------------
#- module: postgresql
#metricsets:
# Stats about every PostgreSQL database
#- database
# Stats about the background writer process's activity
#- bgwriter
# Stats about every PostgreSQL process
#- activity
#enabled: true
#period: 10s
# The host must be passed as PostgreSQL URL. Example:
# postgres://localhost:5432?sslmode=disable
# The available parameters are documented here:
# https://godoc.org/github.com/lib/pq#hdr-Connection_String_Parameters
#hosts: ["postgres://localhost:5432"]
# Username to use when connecting to PostgreSQL. Empty by default.
#username: user
# Password to use when connecting to PostgreSQL. Empty by default.
#password: pass
#----------------------------- Prometheus Module -----------------------------
#- module: prometheus
#metricsets: ["stats"]
#enabled: true
#period: 10s
#hosts: ["localhost:9090"]
#metrics_path: /metrics
#namespace: example
#-------------------------------- Redis Module -------------------------------
#- module: redis
#metricsets: ["info", "keyspace"]
#enabled: true
#period: 10s
# Redis hosts
#hosts: ["127.0.0.1:6379"]
# Timeout after which time a metricset should return an error
# Timeout is by default defined as period, as a fetch of a metricset
# should never take longer then period, as otherwise calls can pile up.
#timeout: 1s
# Optional fields to be added to each event
#fields:
# datacenter: west
# Network type to be used for redis connection. Default: tcp
#network: tcp
# Max number of concurrent connections. Default: 10
#maxconn: 10
# Filters can be used to reduce the number of fields sent.
#filters:
# - include_fields:
# fields: ["stats"]
# Redis AUTH password. Empty by default.
#password: foobared
#------------------------------- Windows Module ------------------------------
#- module: windows
# metricsets: ["perfmon"]
# enabled: true
# period: 10s
# perfmon.counters:
#------------------------------ ZooKeeper Module -----------------------------
#- module: zookeeper
#metricsets: ["mntr"]
#enabled: true
#period: 10s
#hosts: ["localhost:2181"]

View File

@ -1,57 +0,0 @@
###################### Metricbeat Configuration Example #######################
# This file is an example configuration file highlighting only the most common
# options. The metricbeat.full.yml file from the same directory contains all the
# supported options with more comments. You can use it as a reference.
#
# You can find the full configuration reference here:
# https://www.elastic.co/guide/en/beats/metricbeat/index.html
#========================== Modules configuration ============================
metricbeat.modules:
#------------------------------- System Module -------------------------------
- module: system
metricsets:
# CPU stats
- cpu
# System Load stats
- load
# Per CPU core stats
#- core
# IO stats
#- diskio
# Per filesystem stats
- filesystem
# File system summary stats
- fsstat
# Memory stats
- memory
# Network stats
- network
# Per process stats
- process
# Sockets (linux only)
#- socket
enabled: true
period: 10s
processes: ['.*']
#------------------------------- kibana Module -------------------------------
- module: kibana
metricsets: ["status"]
enabled: true
period: 10s
hosts: ["localhost:5601"]

View File

@ -1,13 +0,0 @@
{
"hits": 0,
"timeRestore": false,
"description": "",
"title": "CPU/Memory per container",
"uiStateJSON": "{\"P-2\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}},\"P-4\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}},\"P-5\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}}}",
"panelsJSON": "[{\"col\":4,\"id\":\"Container-CPU-usage\",\"panelIndex\":2,\"row\":1,\"size_x\":9,\"size_y\":4,\"type\":\"visualization\"},{\"col\":1,\"id\":\"System-Navigation\",\"panelIndex\":3,\"row\":1,\"size_x\":3,\"size_y\":4,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Container-Memory-stats\",\"panelIndex\":4,\"row\":5,\"size_x\":12,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Container-Block-IO\",\"panelIndex\":5,\"row\":8,\"size_x\":12,\"size_y\":4,\"type\":\"visualization\"}]",
"optionsJSON": "{\"darkTheme\":false}",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[{\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"*\"}}}]}"
}
}

View File

@ -1,13 +0,0 @@
{
"optionsJSON": "{\"darkTheme\":false}",
"timeRestore": false,
"description": "",
"hits": 0,
"title": "Metricbeat - Apache HTTPD server status",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[{\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true}}}]}"
},
"version": 1,
"panelsJSON": "[{\"id\":\"Apache-HTTPD-CPU\",\"type\":\"visualization\",\"panelIndex\":1,\"size_x\":6,\"size_y\":3,\"col\":7,\"row\":10},{\"id\":\"Apache-HTTPD-Hostname-list\",\"type\":\"visualization\",\"panelIndex\":2,\"size_x\":3,\"size_y\":3,\"col\":1,\"row\":1},{\"id\":\"Apache-HTTPD-Load1-slash-5-slash-15\",\"type\":\"visualization\",\"panelIndex\":3,\"size_x\":6,\"size_y\":3,\"col\":1,\"row\":10},{\"id\":\"Apache-HTTPD-Scoreboard\",\"type\":\"visualization\",\"panelIndex\":4,\"size_x\":12,\"size_y\":3,\"col\":1,\"row\":7},{\"id\":\"Apache-HTTPD-Total-accesses-and-kbytes\",\"type\":\"visualization\",\"panelIndex\":5,\"size_x\":6,\"size_y\":3,\"col\":7,\"row\":1},{\"id\":\"Apache-HTTPD-Uptime\",\"type\":\"visualization\",\"panelIndex\":6,\"size_x\":3,\"size_y\":3,\"col\":4,\"row\":1},{\"id\":\"Apache-HTTPD-Workers\",\"type\":\"visualization\",\"panelIndex\":7,\"size_x\":12,\"size_y\":3,\"col\":1,\"row\":4}]",
"uiStateJSON": "{\"P-2\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}}}"
}

View File

@ -1,13 +0,0 @@
{
"hits": 0,
"timeRestore": false,
"description": "",
"title": "Metricbeat Docker",
"uiStateJSON": "{\"P-1\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":1,\"direction\":\"asc\"}}}},\"P-3\":{\"vis\":{\"legendOpen\":true}},\"P-5\":{\"vis\":{\"legendOpen\":true}},\"P-7\":{\"vis\":{\"legendOpen\":true}}}",
"panelsJSON": "[{\"col\":1,\"id\":\"Docker-containers\",\"panelIndex\":1,\"row\":1,\"size_x\":7,\"size_y\":5,\"type\":\"visualization\"},{\"col\":8,\"id\":\"Docker-Number-of-Containers\",\"panelIndex\":2,\"row\":1,\"size_x\":5,\"size_y\":2,\"type\":\"visualization\"},{\"col\":8,\"id\":\"Docker-containers-per-host\",\"panelIndex\":3,\"row\":3,\"size_x\":2,\"size_y\":3,\"type\":\"visualization\"},{\"col\":10,\"id\":\"Docker-images-and-names\",\"panelIndex\":7,\"row\":3,\"size_x\":3,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Docker-CPU-usage\",\"panelIndex\":4,\"row\":6,\"size_x\":6,\"size_y\":3,\"type\":\"visualization\"},{\"col\":7,\"id\":\"Docker-memory-usage\",\"panelIndex\":5,\"row\":6,\"size_x\":6,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Docker-Network-IO\",\"panelIndex\":6,\"row\":9,\"size_x\":12,\"size_y\":3,\"type\":\"visualization\"}]",
"optionsJSON": "{\"darkTheme\":false}",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[{\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"*\"}}}]}"
}
}

View File

@ -1,13 +0,0 @@
{
"hits": 0,
"timeRestore": false,
"description": "",
"title": "Metricbeat MongoDB",
"uiStateJSON": "{\"P-1\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}}}",
"panelsJSON": "[{\"col\":1,\"id\":\"MongoDB-hosts\",\"panelIndex\":1,\"row\":1,\"size_x\":8,\"size_y\":3,\"type\":\"visualization\"},{\"col\":9,\"id\":\"MongoDB-Engine-ampersand-Version\",\"panelIndex\":4,\"row\":1,\"size_x\":4,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"id\":\"MongoDB-operation-counters\",\"panelIndex\":2,\"row\":4,\"size_x\":6,\"size_y\":3,\"type\":\"visualization\"},{\"col\":7,\"id\":\"MongoDB-Concurrent-transactions-Read\",\"panelIndex\":6,\"row\":4,\"size_x\":3,\"size_y\":3,\"type\":\"visualization\"},{\"col\":10,\"id\":\"MongoDB-Concurrent-transactions-Write\",\"panelIndex\":7,\"row\":4,\"size_x\":3,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"id\":\"MongoDB-memory-stats\",\"panelIndex\":5,\"row\":10,\"size_x\":12,\"size_y\":4,\"type\":\"visualization\"},{\"col\":7,\"id\":\"MongoDB-asserts\",\"panelIndex\":3,\"row\":7,\"size_x\":6,\"size_y\":3,\"type\":\"visualization\"},{\"id\":\"MongoDB-WiredTiger-Cache\",\"type\":\"visualization\",\"panelIndex\":8,\"size_x\":6,\"size_y\":3,\"col\":1,\"row\":7}]",
"optionsJSON": "{\"darkTheme\":false}",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[{\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"*\"}}}]}"
}
}

View File

@ -1,13 +0,0 @@
{
"hits": 0,
"timeRestore": false,
"description": "",
"title": "Metricbeat: Redis",
"uiStateJSON": "{\"P-3\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}},\"P-4\":{\"vis\":{\"legendOpen\":true}}}",
"panelsJSON": "[{\"col\":1,\"id\":\"Redis-Clients-Metrics\",\"panelIndex\":2,\"row\":1,\"size_x\":3,\"size_y\":3,\"type\":\"visualization\"},{\"col\":4,\"id\":\"Redis-Connected-clients\",\"panelIndex\":1,\"row\":1,\"size_x\":5,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Redis-hosts\",\"panelIndex\":3,\"row\":4,\"size_x\":12,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Redis-Server-Versions\",\"panelIndex\":4,\"row\":6,\"size_x\":4,\"size_y\":2,\"type\":\"visualization\"},{\"col\":5,\"id\":\"Redis-server-mode\",\"panelIndex\":5,\"row\":6,\"size_x\":4,\"size_y\":2,\"type\":\"visualization\"},{\"col\":9,\"id\":\"Redis-multiplexing-API\",\"panelIndex\":6,\"row\":6,\"size_x\":3,\"size_y\":2,\"type\":\"visualization\"},{\"id\":\"Redis-Keyspaces\",\"type\":\"visualization\",\"panelIndex\":7,\"size_x\":4,\"size_y\":3,\"col\":9,\"row\":1}]",
"optionsJSON": "{\"darkTheme\":false}",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[{\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"*\"}}}]}"
}
}

View File

@ -1,13 +0,0 @@
{
"hits": 0,
"timeRestore": false,
"description": "",
"title": "Metricbeat-cpu",
"uiStateJSON": "{\"P-9\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}}}",
"panelsJSON": "[{\"col\":1,\"id\":\"System-Navigation\",\"panelIndex\":2,\"row\":1,\"size_x\":2,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"id\":\"CPU-usage-over-time\",\"panelIndex\":4,\"row\":4,\"size_x\":6,\"size_y\":5,\"type\":\"visualization\"},{\"col\":9,\"id\":\"System-load\",\"panelIndex\":6,\"row\":1,\"size_x\":4,\"size_y\":3,\"type\":\"visualization\"},{\"col\":7,\"id\":\"System-Load-over-time\",\"panelIndex\":8,\"row\":4,\"size_x\":6,\"size_y\":5,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Top-hosts-by-CPU-usage\",\"panelIndex\":9,\"row\":9,\"size_x\":12,\"size_y\":5,\"type\":\"visualization\"},{\"col\":3,\"id\":\"CPU-Usage\",\"panelIndex\":10,\"row\":1,\"size_x\":6,\"size_y\":3,\"type\":\"visualization\"}]",
"optionsJSON": "{\"darkTheme\":false}",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[{\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"*\"}}}]}"
}
}

View File

@ -1,13 +0,0 @@
{
"hits": 0,
"timeRestore": false,
"description": "",
"title": "Metricbeat filesystem per Host",
"uiStateJSON": "{\"P-1\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}}}",
"panelsJSON": "[{\"col\":1,\"id\":\"Top-disks-by-memory-usage\",\"panelIndex\":1,\"row\":6,\"size_x\":12,\"size_y\":5,\"type\":\"visualization\"},{\"col\":7,\"id\":\"Disk-utilization-over-time\",\"panelIndex\":2,\"row\":1,\"size_x\":6,\"size_y\":5,\"type\":\"visualization\"},{\"col\":1,\"id\":\"System-Navigation\",\"panelIndex\":3,\"row\":1,\"size_x\":3,\"size_y\":3,\"type\":\"visualization\"},{\"col\":4,\"id\":\"Disk-space-distribution\",\"panelIndex\":5,\"row\":1,\"size_x\":3,\"size_y\":5,\"type\":\"visualization\"}]",
"optionsJSON": "{\"darkTheme\":false}",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[{\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"*\"}}}]}"
}
}

View File

@ -1,13 +0,0 @@
{
"hits": 0,
"timeRestore": false,
"description": "",
"title": "Metricbeat-filesystem",
"uiStateJSON": "{\"P-5\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}}}",
"panelsJSON": "[{\"col\":1,\"id\":\"System-Navigation\",\"panelIndex\":1,\"row\":1,\"size_x\":2,\"size_y\":4,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Top-hosts-by-disk-size\",\"panelIndex\":5,\"row\":10,\"size_x\":12,\"size_y\":4,\"type\":\"visualization\"},{\"col\":4,\"id\":\"Disk-space-overview\",\"panelIndex\":6,\"row\":1,\"size_x\":9,\"size_y\":4,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Free-disk-space-over-days\",\"panelIndex\":7,\"row\":5,\"size_x\":6,\"size_y\":5,\"type\":\"visualization\"},{\"col\":7,\"id\":\"Total-files-over-days\",\"panelIndex\":8,\"row\":5,\"size_x\":6,\"size_y\":5,\"type\":\"visualization\"}]",
"optionsJSON": "{\"darkTheme\":false}",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[{\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"*\"}}}]}"
}
}

View File

@ -1,13 +0,0 @@
{
"hits": 0,
"timeRestore": false,
"description": "",
"title": "Metricbeat-memory",
"uiStateJSON": "{\"P-7\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":1,\"direction\":\"desc\"}}}}}",
"panelsJSON": "[{\"col\":1,\"id\":\"System-Navigation\",\"panelIndex\":1,\"row\":1,\"size_x\":2,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Top-hosts-by-memory-usage\",\"panelIndex\":7,\"row\":9,\"size_x\":12,\"size_y\":5,\"type\":\"visualization\"},{\"col\":7,\"id\":\"Memory-usage-over-time\",\"panelIndex\":10,\"row\":4,\"size_x\":6,\"size_y\":5,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Swap-usage-over-time\",\"panelIndex\":11,\"row\":4,\"size_x\":6,\"size_y\":5,\"type\":\"visualization\"},{\"col\":3,\"id\":\"Total-Memory\",\"panelIndex\":12,\"row\":1,\"size_x\":2,\"size_y\":3,\"type\":\"visualization\"},{\"col\":5,\"id\":\"Available-Memory\",\"panelIndex\":13,\"row\":1,\"size_x\":2,\"size_y\":3,\"type\":\"visualization\"},{\"col\":7,\"id\":\"Memory-usage\",\"panelIndex\":14,\"row\":1,\"size_x\":3,\"size_y\":3,\"type\":\"visualization\"},{\"col\":10,\"id\":\"Swap-usage\",\"panelIndex\":15,\"row\":1,\"size_x\":3,\"size_y\":3,\"type\":\"visualization\"}]",
"optionsJSON": "{\"darkTheme\":false}",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[{\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"*\"}}}]}"
}
}

View File

@ -1,13 +0,0 @@
{
"hits": 0,
"timeRestore": false,
"description": "",
"title": "Metricbeat-network",
"uiStateJSON": "{\"P-6\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}}}",
"panelsJSON": "[{\"col\":1,\"id\":\"In-vs-Out-Network-Bytes\",\"panelIndex\":5,\"row\":4,\"size_x\":6,\"size_y\":5,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Top-10-interfaces\",\"panelIndex\":6,\"row\":9,\"size_x\":12,\"size_y\":6,\"type\":\"visualization\"},{\"col\":9,\"id\":\"Network-Packetloss\",\"panelIndex\":13,\"row\":1,\"size_x\":4,\"size_y\":3,\"type\":\"visualization\"},{\"col\":7,\"id\":\"Packet-loss-on-interfaces\",\"panelIndex\":22,\"row\":4,\"size_x\":6,\"size_y\":5,\"type\":\"visualization\"},{\"col\":1,\"id\":\"System-Navigation\",\"panelIndex\":23,\"row\":1,\"size_x\":2,\"size_y\":3,\"type\":\"visualization\"},{\"col\":3,\"id\":\"Network-Bytes\",\"panelIndex\":24,\"row\":1,\"size_x\":5,\"size_y\":3,\"type\":\"visualization\"}]",
"optionsJSON": "{\"darkTheme\":false}",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[{\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"*\"}}}]}"
}
}

View File

@ -1,13 +0,0 @@
{
"hits": 0,
"timeRestore": false,
"description": "",
"title": "Metricbeat-overview",
"uiStateJSON": "{\"P-1\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}}}",
"panelsJSON": "[{\"id\":\"Servers-overview\",\"type\":\"visualization\",\"panelIndex\":1,\"size_x\":9,\"size_y\":5,\"col\":4,\"row\":1},{\"id\":\"System-Navigation\",\"type\":\"visualization\",\"panelIndex\":2,\"size_x\":3,\"size_y\":4,\"col\":1,\"row\":1}]",
"optionsJSON": "{\"darkTheme\":false}",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[{\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true}}}]}"
}
}

View File

@ -1,13 +0,0 @@
{
"hits": 0,
"timeRestore": false,
"description": "",
"title": "Metricbeat-processes",
"uiStateJSON": "{\"P-1\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}},\"P-4\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}}}",
"panelsJSON": "[{\"col\":1,\"id\":\"System-Navigation\",\"panelIndex\":5,\"row\":1,\"size_x\":3,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Number-of-processes\",\"panelIndex\":7,\"row\":4,\"size_x\":3,\"size_y\":3,\"type\":\"visualization\"},{\"col\":4,\"id\":\"Process-state-by-host\",\"panelIndex\":9,\"row\":1,\"size_x\":5,\"size_y\":3,\"type\":\"visualization\"},{\"col\":9,\"id\":\"Number-of-processes-by-host\",\"panelIndex\":8,\"row\":1,\"size_x\":4,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"id\":\"CPU-usage-per-process\",\"panelIndex\":2,\"row\":7,\"size_x\":6,\"size_y\":8,\"type\":\"visualization\"},{\"col\":7,\"id\":\"Memory-usage-per-process\",\"panelIndex\":3,\"row\":7,\"size_x\":6,\"size_y\":8,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Top-processes-by-memory-usage\",\"panelIndex\":1,\"row\":15,\"size_x\":6,\"size_y\":11,\"type\":\"visualization\"},{\"col\":7,\"id\":\"Top-processes-by-CPU-usage\",\"panelIndex\":4,\"row\":15,\"size_x\":6,\"size_y\":11,\"type\":\"visualization\"},{\"id\":\"Number-of-processes-over-time\",\"type\":\"visualization\",\"panelIndex\":10,\"size_x\":9,\"size_y\":3,\"col\":4,\"row\":4}]",
"optionsJSON": "{\"darkTheme\":false}",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[{\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"*\"}}}]}"
}
}

View File

@ -1,13 +0,0 @@
{
"hits": 0,
"timeRestore": false,
"description": "",
"title": "Metricbeat system overview",
"uiStateJSON": "{\"P-14\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}}}",
"panelsJSON": "[{\"col\":1,\"id\":\"Network-Bytes\",\"panelIndex\":2,\"row\":6,\"size_x\":8,\"size_y\":2,\"type\":\"visualization\"},{\"col\":9,\"id\":\"Network-Packetloss\",\"panelIndex\":3,\"row\":6,\"size_x\":4,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"System-Navigation\",\"panelIndex\":9,\"row\":1,\"size_x\":3,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Total-Memory\",\"panelIndex\":11,\"row\":4,\"size_x\":2,\"size_y\":2,\"type\":\"visualization\"},{\"col\":3,\"id\":\"Available-Memory\",\"panelIndex\":12,\"row\":4,\"size_x\":2,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"System-overview-by-host\",\"panelIndex\":14,\"row\":8,\"size_x\":12,\"size_y\":6,\"type\":\"visualization\"},{\"col\":5,\"id\":\"System-load\",\"panelIndex\":15,\"row\":1,\"size_x\":8,\"size_y\":3,\"type\":\"visualization\"},{\"col\":5,\"id\":\"CPU-Usage\",\"panelIndex\":16,\"row\":4,\"size_x\":8,\"size_y\":2,\"type\":\"visualization\"}]",
"optionsJSON": "{\"darkTheme\":false}",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[{\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"*\"}}}]}"
}
}

View File

@ -1,16 +0,0 @@
{
"description": "",
"hits": 0,
"columns": [
"_source"
],
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"metricbeat-*\",\"query\":{\"query_string\":{\"query\":\"metricset.module: apache\",\"analyze_wildcard\":true}},\"filter\":[],\"highlight\":{\"pre_tags\":[\"@kibana-highlighted-field@\"],\"post_tags\":[\"@/kibana-highlighted-field@\"],\"fields\":{\"*\":{}},\"require_field_match\":false,\"fragment_size\":2147483647}}"
},
"sort": [
"@timestamp",
"desc"
],
"title": "Apache HTTPD",
"version": 1
}

View File

@ -1,16 +0,0 @@
{
"sort": [
"@timestamp",
"desc"
],
"hits": 0,
"description": "",
"title": "Cpu-Load stats",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"metricbeat-*\",\"query\":{\"query_string\":{\"query\":\"metricset.module: system AND (metricset.name: cpu OR metricset.name: load)\",\"analyze_wildcard\":true}},\"filter\":[],\"highlight\":{\"pre_tags\":[\"@kibana-highlighted-field@\"],\"post_tags\":[\"@/kibana-highlighted-field@\"],\"fields\":{\"*\":{}},\"require_field_match\":false,\"fragment_size\":2147483647}}"
},
"columns": [
"_source"
]
}

View File

@ -1,16 +0,0 @@
{
"sort": [
"@timestamp",
"desc"
],
"hits": 0,
"description": "",
"title": "Cpu stats",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"metricbeat-*\",\"query\":{\"query_string\":{\"query\":\"metricset.module: system AND metricset.name: cpu\",\"analyze_wildcard\":true}},\"filter\":[],\"highlight\":{\"pre_tags\":[\"@kibana-highlighted-field@\"],\"post_tags\":[\"@/kibana-highlighted-field@\"],\"fields\":{\"*\":{}},\"require_field_match\":false,\"fragment_size\":2147483647}}"
},
"columns": [
"_source"
]
}

View File

@ -1,16 +0,0 @@
{
"sort": [
"@timestamp",
"desc"
],
"hits": 0,
"description": "",
"title": "Filesystem stats",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"metricbeat-*\",\"filter\":[],\"highlight\":{\"pre_tags\":[\"@kibana-highlighted-field@\"],\"post_tags\":[\"@/kibana-highlighted-field@\"],\"fields\":{\"*\":{}},\"require_field_match\":false,\"fragment_size\":2147483647},\"query\":{\"query_string\":{\"query\":\"metricset.module: system AND metricset.name: filesystem\",\"analyze_wildcard\":true}}}"
},
"columns": [
"_source"
]
}

View File

@ -1,16 +0,0 @@
{
"sort": [
"@timestamp",
"desc"
],
"hits": 0,
"description": "",
"title": "Fsstats",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"metricbeat-*\",\"filter\":[],\"highlight\":{\"pre_tags\":[\"@kibana-highlighted-field@\"],\"post_tags\":[\"@/kibana-highlighted-field@\"],\"fields\":{\"*\":{}},\"require_field_match\":false,\"fragment_size\":2147483647},\"query\":{\"query_string\":{\"query\":\"metricset.module: system AND metricset.name: fsstat\",\"analyze_wildcard\":true}}}"
},
"columns": [
"_source"
]
}

View File

@ -1,16 +0,0 @@
{
"sort": [
"@timestamp",
"desc"
],
"hits": 0,
"description": "",
"title": "Load stats",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"metricbeat-*\",\"query\":{\"query_string\":{\"query\":\"metricset.module: system AND metricset.name: load\",\"analyze_wildcard\":true}},\"filter\":[],\"highlight\":{\"pre_tags\":[\"@kibana-highlighted-field@\"],\"post_tags\":[\"@/kibana-highlighted-field@\"],\"fields\":{\"*\":{}},\"require_field_match\":false,\"fragment_size\":2147483647}}"
},
"columns": [
"_source"
]
}

View File

@ -1,16 +0,0 @@
{
"sort": [
"@timestamp",
"desc"
],
"hits": 0,
"description": "",
"title": "Memory stats",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"metricbeat-*\",\"filter\":[],\"highlight\":{\"pre_tags\":[\"@kibana-highlighted-field@\"],\"post_tags\":[\"@/kibana-highlighted-field@\"],\"fields\":{\"*\":{}},\"require_field_match\":false,\"fragment_size\":2147483647},\"query\":{\"query_string\":{\"query\":\"metricset.module: system AND metricset.name: memory\",\"analyze_wildcard\":true}}}"
},
"columns": [
"_source"
]
}

View File

@ -1,16 +0,0 @@
{
"sort": [
"@timestamp",
"desc"
],
"hits": 0,
"description": "",
"title": "Metricbeat Docker",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"metricbeat-*\",\"filter\":[],\"highlight\":{\"pre_tags\":[\"@kibana-highlighted-field@\"],\"post_tags\":[\"@/kibana-highlighted-field@\"],\"fields\":{\"*\":{}},\"require_field_match\":false,\"fragment_size\":2147483647},\"query\":{\"query_string\":{\"query\":\"metricset.module:docker\",\"analyze_wildcard\":true}}}"
},
"columns": [
"_source"
]
}

View File

@ -1,16 +0,0 @@
{
"sort": [
"@timestamp",
"desc"
],
"hits": 0,
"description": "",
"title": "Metricbeat Redis",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"metricbeat-*\",\"filter\":[],\"highlight\":{\"pre_tags\":[\"@kibana-highlighted-field@\"],\"post_tags\":[\"@/kibana-highlighted-field@\"],\"fields\":{\"*\":{}},\"require_field_match\":false,\"fragment_size\":2147483647},\"query\":{\"query_string\":{\"query\":\"metricset.module:redis\",\"analyze_wildcard\":true}}}"
},
"columns": [
"_source"
]
}

View File

@ -1,16 +0,0 @@
{
"sort": [
"@timestamp",
"desc"
],
"hits": 0,
"description": "",
"title": "MongoDB search",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"metricbeat-*\",\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"metricset.module:mongodb\"}},\"filter\":[],\"highlight\":{\"pre_tags\":[\"@kibana-highlighted-field@\"],\"post_tags\":[\"@/kibana-highlighted-field@\"],\"fields\":{\"*\":{}},\"require_field_match\":false,\"fragment_size\":2147483647}}"
},
"columns": [
"_source"
]
}

View File

@ -1,16 +0,0 @@
{
"sort": [
"@timestamp",
"desc"
],
"hits": 0,
"description": "",
"title": "Network data",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"metricbeat-*\",\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"metricset.module: system AND metricset.name: network\"}},\"filter\":[],\"highlight\":{\"pre_tags\":[\"@kibana-highlighted-field@\"],\"post_tags\":[\"@/kibana-highlighted-field@\"],\"fields\":{\"*\":{}},\"require_field_match\":false,\"fragment_size\":2147483647}}"
},
"columns": [
"_source"
]
}

View File

@ -1,16 +0,0 @@
{
"sort": [
"@timestamp",
"desc"
],
"hits": 0,
"description": "",
"title": "Process stats",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"metricbeat-*\",\"query\":{\"query_string\":{\"query\":\"metricset.name: process\",\"analyze_wildcard\":true}},\"highlight\":{\"pre_tags\":[\"@kibana-highlighted-field@\"],\"post_tags\":[\"@/kibana-highlighted-field@\"],\"fields\":{\"*\":{}},\"require_field_match\":false,\"fragment_size\":2147483647},\"filter\":[]}"
},
"columns": [
"_source"
]
}

View File

@ -1,16 +0,0 @@
{
"sort": [
"@timestamp",
"desc"
],
"hits": 0,
"description": "",
"title": "System stats",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"metricbeat-*\",\"filter\":[],\"highlight\":{\"pre_tags\":[\"@kibana-highlighted-field@\"],\"post_tags\":[\"@/kibana-highlighted-field@\"],\"fields\":{\"*\":{}},\"require_field_match\":false,\"fragment_size\":2147483647},\"query\":{\"query_string\":{\"query\":\"metricset.module: system\",\"analyze_wildcard\":true}}}"
},
"columns": [
"_source"
]
}

View File

@ -1,11 +0,0 @@
{
"description": "",
"uiStateJSON": "{}",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\n \"filter\": []\n}"
},
"savedSearchId": "Apache-HTTPD",
"visState": "{\n \"title\": \"Apache HTTPD - CPU\",\n \"type\": \"line\",\n \"params\": {\n \"shareYAxis\": true,\n \"addTooltip\": true,\n \"addLegend\": true,\n \"showCircles\": true,\n \"smoothLines\": false,\n \"interpolate\": \"linear\",\n \"scale\": \"linear\",\n \"drawLinesBetweenPoints\": true,\n \"radiusRatio\": 9,\n \"times\": [],\n \"addTimeMarker\": false,\n \"defaultYExtents\": false,\n \"setYExtents\": false,\n \"yAxis\": {}\n },\n \"aggs\": [\n {\n \"id\": \"1\",\n \"type\": \"avg\",\n \"schema\": \"metric\",\n \"params\": {\n \"field\": \"apache.status.cpu.load\",\n \"customLabel\": \"CPU load\"\n }\n },\n {\n \"id\": \"2\",\n \"type\": \"date_histogram\",\n \"schema\": \"segment\",\n \"params\": {\n \"field\": \"@timestamp\",\n \"interval\": \"auto\",\n \"customInterval\": \"2h\",\n \"min_doc_count\": 1,\n \"extended_bounds\": {}\n }\n },\n {\n \"id\": \"3\",\n \"type\": \"terms\",\n \"schema\": \"split\",\n \"params\": {\n \"field\": \"apache.status.hostname\",\n \"size\": 5,\n \"order\": \"desc\",\n \"orderBy\": \"1\",\n \"row\": true\n }\n },\n {\n \"id\": \"4\",\n \"type\": \"avg\",\n \"schema\": \"metric\",\n \"params\": {\n \"field\": \"apache.status.cpu.user\",\n \"customLabel\": \"CPU user\"\n }\n },\n {\n \"id\": \"5\",\n \"type\": \"avg\",\n \"schema\": \"metric\",\n \"params\": {\n \"field\": \"apache.status.cpu.system\",\n \"customLabel\": \"CPU system\"\n }\n },\n {\n \"id\": \"6\",\n \"type\": \"avg\",\n \"schema\": \"metric\",\n \"params\": {\n \"field\": \"apache.status.cpu.children_user\",\n \"customLabel\": \"CPU children user\"\n }\n },\n {\n \"id\": \"7\",\n \"type\": \"avg\",\n \"schema\": \"metric\",\n \"params\": {\n \"field\": \"apache.status.cpu.children_system\",\n \"customLabel\": \"CPU children system\"\n }\n }\n ],\n \"listeners\": {}\n}",
"title": "Apache HTTPD - CPU",
"version": 1
}

View File

@ -1,11 +0,0 @@
{
"description": "",
"uiStateJSON": "{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}}",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[]}"
},
"savedSearchId": "Apache-HTTPD",
"visState": "{\"title\":\"Apache HTTPD - Hostname list\",\"type\":\"table\",\"params\":{\"perPage\":10,\"showPartialRows\":false,\"showMeticsAtAllLevels\":false,\"sort\":{\"columnIndex\":null,\"direction\":null}},\"aggs\":[{\"id\":\"1\",\"type\":\"count\",\"schema\":\"metric\",\"params\":{\"customLabel\":\"Events count\"}},{\"id\":\"2\",\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"apache.status.hostname\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Apache HTTD Hostname\"}}],\"listeners\":{}}",
"title": "Apache HTTPD - Hostname list",
"version": 1
}

View File

@ -1,11 +0,0 @@
{
"description": "",
"uiStateJSON": "{}",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[]}"
},
"savedSearchId": "Apache-HTTPD",
"visState": "{\"title\":\"Apache HTTPD - Load1/5/15\",\"type\":\"line\",\"params\":{\"shareYAxis\":true,\"addTooltip\":true,\"addLegend\":true,\"showCircles\":true,\"smoothLines\":false,\"interpolate\":\"linear\",\"scale\":\"linear\",\"drawLinesBetweenPoints\":true,\"radiusRatio\":9,\"times\":[],\"addTimeMarker\":false,\"defaultYExtents\":false,\"setYExtents\":false,\"yAxis\":{}},\"aggs\":[{\"id\":\"1\",\"type\":\"avg\",\"schema\":\"metric\",\"params\":{\"field\":\"apache.status.load.5\",\"customLabel\":\"Load 5\"}},{\"id\":\"2\",\"type\":\"date_histogram\",\"schema\":\"segment\",\"params\":{\"field\":\"@timestamp\",\"interval\":\"auto\",\"customInterval\":\"2h\",\"min_doc_count\":1,\"extended_bounds\":{}}},{\"id\":\"3\",\"type\":\"avg\",\"schema\":\"metric\",\"params\":{\"field\":\"apache.status.load.1\",\"customLabel\":\"Load 1\"}},{\"id\":\"4\",\"type\":\"avg\",\"schema\":\"metric\",\"params\":{\"field\":\"apache.status.load.15\",\"customLabel\":\"Load 15\"}},{\"id\":\"5\",\"type\":\"terms\",\"schema\":\"split\",\"params\":{\"field\":\"apache.status.hostname\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Hostname\",\"row\":true}}],\"listeners\":{}}",
"title": "Apache HTTPD - Load1/5/15",
"version": 1
}

View File

@ -1,11 +0,0 @@
{
"description": "",
"uiStateJSON": "{}",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[]}"
},
"savedSearchId": "Apache-HTTPD",
"visState": "{\"title\":\"Apache HTTPD - Scoreboard\",\"type\":\"line\",\"params\":{\"shareYAxis\":true,\"addTooltip\":true,\"addLegend\":true,\"showCircles\":true,\"smoothLines\":false,\"interpolate\":\"linear\",\"scale\":\"linear\",\"drawLinesBetweenPoints\":true,\"radiusRatio\":9,\"times\":[],\"addTimeMarker\":false,\"defaultYExtents\":false,\"setYExtents\":false,\"yAxis\":{}},\"aggs\":[{\"id\":\"1\",\"type\":\"avg\",\"schema\":\"metric\",\"params\":{\"field\":\"apache.status.scoreboard.closing_connection\",\"customLabel\":\"Closing connection\"}},{\"id\":\"2\",\"type\":\"date_histogram\",\"schema\":\"segment\",\"params\":{\"field\":\"@timestamp\",\"interval\":\"auto\",\"customInterval\":\"2h\",\"min_doc_count\":1,\"extended_bounds\":{}}},{\"id\":\"3\",\"type\":\"terms\",\"schema\":\"split\",\"params\":{\"field\":\"apache.status.hostname\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Hostname\",\"row\":true}},{\"id\":\"4\",\"type\":\"avg\",\"schema\":\"metric\",\"params\":{\"field\":\"apache.status.scoreboard.dns_lookup\",\"customLabel\":\"DNS lookup\"}},{\"id\":\"5\",\"type\":\"avg\",\"schema\":\"metric\",\"params\":{\"field\":\"apache.status.scoreboard.gracefully_finishing\",\"customLabel\":\"Gracefully finishing\"}},{\"id\":\"6\",\"type\":\"avg\",\"schema\":\"metric\",\"params\":{\"field\":\"apache.status.scoreboard.idle_cleanup\",\"customLabel\":\"Idle cleanup\"}},{\"id\":\"7\",\"type\":\"avg\",\"schema\":\"metric\",\"params\":{\"field\":\"apache.status.scoreboard.keepalive\",\"customLabel\":\"Keepalive\"}},{\"id\":\"8\",\"type\":\"avg\",\"schema\":\"metric\",\"params\":{\"field\":\"apache.status.scoreboard.logging\",\"customLabel\":\"Logging\"}},{\"id\":\"9\",\"type\":\"avg\",\"schema\":\"metric\",\"params\":{\"field\":\"apache.status.scoreboard.open_slot\",\"customLabel\":\"Open slot\"}},{\"id\":\"10\",\"type\":\"avg\",\"schema\":\"metric\",\"params\":{\"field\":\"apache.status.scoreboard.reading_request\",\"customLabel\":\"Reading request\"}},{\"id\":\"11\",\"type\":\"avg\",\"schema\":\"metric\",\"params\":{\"field\":\"apache.status.scoreboard.sending_reply\",\"customLabel\":\"Sending reply\"}},{\"id\":\"12\",\"type\":\"avg\",\"schema\":\"metric\",\"params\":{\"field\":\"apache.status.scoreboard.starting_up\",\"customLabel\":\"Starting up\"}},{\"id\":\"13\",\"type\":\"avg\",\"schema\":\"metric\",\"params\":{\"field\":\"apache.status.scoreboard.total\",\"customLabel\":\"Total\"}},{\"id\":\"14\",\"type\":\"avg\",\"schema\":\"metric\",\"params\":{\"field\":\"apache.status.scoreboard.waiting_for_connection\",\"customLabel\":\"Waiting for connection\"}}],\"listeners\":{}}",
"title": "Apache HTTPD - Scoreboard",
"version": 1
}

Some files were not shown because too many files have changed in this diff Show More