diff --git a/Makefile b/Makefile index 41622ba5..7b19992a 100644 --- a/Makefile +++ b/Makefile @@ -1,13 +1,17 @@ BEATNAME=icingabeat BEAT_DIR=github.com/icinga/icingabeat BEAT_DESCRIPTION=Icingabeat ships Icinga 2 events and states to Elasticsearch or Logstash. +BEAT_VENDOR=Icinga +BEAT_DOC_URL=https://github.com/Icinga/icingabeat SYSTEM_TESTS=false TEST_ENVIRONMENT=false ES_BEATS?=./vendor/github.com/elastic/beats GOPACKAGES=$(shell glide novendor) PREFIX?=. -SNAPSHOT=false +#TARGETS="linux/amd64 linux/386 windows/amd64 windows/386 darwin/amd64" +#PACKAGES=${BEATNAME}/deb ${BEATNAME}/rpm ${BEATNAME}/darwin ${BEATNAME}/win ${BEATNAME}/bin +#SNAPSHOT=false # Path to the libbeat Makefile -include $(ES_BEATS)/libbeat/scripts/Makefile diff --git a/vendor/github.com/elastic/beats/.appveyor.yml b/vendor/github.com/elastic/beats/.appveyor.yml index 257f5bcb..04e6b737 100644 --- a/vendor/github.com/elastic/beats/.appveyor.yml +++ b/vendor/github.com/elastic/beats/.appveyor.yml @@ -6,7 +6,7 @@ os: Windows Server 2012 R2 # Environment variables environment: - GOROOT: c:\go1.7.1 + GOROOT: c:\go1.7.4 GOPATH: c:\gopath PYWIN_DL: https://beats-files.s3.amazonaws.com/deps/pywin32-220.win32-py2.7.exe matrix: @@ -24,13 +24,13 @@ clone_folder: c:\gopath\src\github.com\elastic\beats cache: - C:\ProgramData\chocolatey\bin -> .appveyor.yml - C:\ProgramData\chocolatey\lib -> .appveyor.yml -- C:\go1.7.1 -> .appveyor.yml +- C:\go1.7.4 -> .appveyor.yml - C:\tools\mingw64 -> .appveyor.yml - C:\pywin_inst.exe -> .appveyor.yml # Scripts that run after cloning repository install: - - ps: c:\gopath\src\github.com\elastic\beats\libbeat\scripts\install-go.ps1 -version 1.7.1 + - ps: c:\gopath\src\github.com\elastic\beats\libbeat\scripts\install-go.ps1 -version 1.7.4 - set PATH=%GOROOT%\bin;%PATH% # AppVeyor installed mingw is 32-bit only. - ps: >- @@ -51,7 +51,7 @@ install: - set PYTHONPATH=C:\Python27 - set PATH=%PYTHONPATH%;%PYTHONPATH%\Scripts;%PATH% - python --version - - pip install jinja2 nose nose-timer PyYAML redis + - pip install jinja2 nose nose-timer PyYAML redis elasticsearch - easy_install C:/pywin_inst.exe # To run your custom scripts instead of automatic MSBuild diff --git a/vendor/github.com/elastic/beats/.travis.yml b/vendor/github.com/elastic/beats/.travis.yml index b7b02730..2fe10844 100644 --- a/vendor/github.com/elastic/beats/.travis.yml +++ b/vendor/github.com/elastic/beats/.travis.yml @@ -5,63 +5,80 @@ services: language: go -go: - - 1.7.1 - # Make sure project can also be built on travis for clones of the repo go_import_path: github.com/elastic/beats -os: - - linux - - osx - env: - matrix: - - TARGETS="check" - - TARGETS="-C filebeat testsuite" - - TARGETS="-C heartbeat testsuite" - - TARGETS="-C libbeat testsuite" - - TARGETS="-C metricbeat testsuite" - - TARGETS="-C packetbeat testsuite" - - TARGETS="-C libbeat crosscompile" - - TARGETS="-C metricbeat crosscompile" - - TARGETS="-C winlogbeat crosscompile" - - TARGETS="-C libbeat/dashboards" - - TARGETS="-C generate/metricbeat/metricset test" - - TARGETS="-C generate/beat test" global: # Cross-compile for amd64 only to speed up testing. - GOX_FLAGS="-arch amd64" - - DOCKER_COMPOSE_VERSION: 1.8.1 + - DOCKER_COMPOSE_VERSION: 1.9.0 + - &go_version 1.7.4 matrix: - exclude: - - os: osx + include: + # General checks + - os: linux env: TARGETS="check" + go: *go_version + + # Filebeat + - os: linux + env: TARGETS="-C filebeat testsuite" + go: *go_version - os: osx - env: TARGETS="-C filebeat crosscompile" - - os: osx - env: TARGETS="-C libbeat crosscompile" - - os: osx - env: TARGETS="-C metricbeat crosscompile" - - os: osx - env: TARGETS="-C winlogbeat crosscompile" + env: TARGETS="TEST_ENVIRONMENT=0 -C filebeat testsuite" + go: *go_version + + # Heartbeat + - os: linux + env: TARGETS="-C heartbeat testsuite" + go: *go_version - os: osx + env: TARGETS="TEST_ENVIRONMENT=0 -C heartbeat testsuite" + go: *go_version + + # Libbeat + - os: linux env: TARGETS="-C libbeat testsuite" - - os: osx - env: TARGETS="-C hearbeat testsuite" - - os: osx + go: *go_version + - os: linux + env: TARGETS="-C libbeat crosscompile" + go: *go_version + + # Metricbeat + - os: linux env: TARGETS="-C metricbeat testsuite" + go: *go_version - os: osx + env: TARGETS="TEST_ENVIRONMENT=0 -C metricbeat testsuite" + go: *go_version + - os: linux + env: TARGETS="-C metricbeat crosscompile" + go: *go_version + + # Packetbeat + - os: linux + env: TARGETS="-C packetbeat testsuite" + go: *go_version + + # Winlogbeat + - os: linux + env: TARGETS="-C winlogbeat crosscompile" + go: *go_version + + # Dashboards + - os: linux env: TARGETS="-C libbeat/dashboards" - - os: osx + go: *go_version + + # Generators + - os: linux env: TARGETS="-C generate/metricbeat/metricset test" - - os: osx + go: *go_version + - os: linux env: TARGETS="-C generate/beat test" - fast_finish: true - allow_failures: - - env: TARGETS="-C libbeat crosscompile" - - env: TARGETS="-C filebeat crosscompile" + go: *go_version addons: apt: @@ -71,20 +88,14 @@ addons: - geoip-database before_install: - # Update to most recent docker version - - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then - sudo apt-get update; - sudo apt-cache search docker; - sudo apt-get -o Dpkg::Options::="--force-confnew" install -y docker-engine; - fi # Docker-compose installation - sudo rm /usr/local/bin/docker-compose || true - curl -L https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > docker-compose - chmod +x docker-compose - sudo mv docker-compose /usr/local/bin -install: - - true +# Skips installations step +install: true script: - make $TARGETS @@ -97,6 +108,7 @@ notifications: after_success: # Copy full.cov to coverage.txt because codecov.io requires this file - test -f filebeat/build/coverage/full.cov && bash <(curl -s https://codecov.io/bash) -f filebeat/build/coverage/full.cov - - test -f packetbeat/build/coverage/full.cov && bash <(curl -s https://codecov.io/bash) -f packetbeat/build/coverage/full.cov + - test -f heartbeat/build/coverage/full.cov && bash <(curl -s https://codecov.io/bash) -f heartbeat/build/coverage/full.cov - test -f libbeat/build/coverage/full.cov && bash <(curl -s https://codecov.io/bash) -f libbeat/build/coverage/full.cov - test -f metricbeat/build/coverage/full.cov && bash <(curl -s https://codecov.io/bash) -f metricbeat/build/coverage/full.cov + - test -f packetbeat/build/coverage/full.cov && bash <(curl -s https://codecov.io/bash) -f packetbeat/build/coverage/full.cov diff --git a/vendor/github.com/elastic/beats/CHANGELOG.asciidoc b/vendor/github.com/elastic/beats/CHANGELOG.asciidoc index 8b43e374..4f755970 100644 --- a/vendor/github.com/elastic/beats/CHANGELOG.asciidoc +++ b/vendor/github.com/elastic/beats/CHANGELOG.asciidoc @@ -8,31 +8,37 @@ // Template, add newest changes here === Beats version HEAD -https://github.com/elastic/beats/compare/v5.1.2...5.1[Check the HEAD diff] +https://github.com/elastic/beats/compare/v5.2.0...master[Check the HEAD diff] ==== Breaking changes *Affecting all Beats* +*Filebeat* + +*Heartbeat* + *Metricbeat* *Packetbeat* -*Filebeat* - *Winlogbeat* - ==== Bugfixes *Affecting all Beats* +*Filebeat* + +*Heartbeat* + *Metricbeat* -*Packetbeat* +- Fix go routine leak in docker module. {pull}3492[3492] +- Fix bug docker module hanging when docker container killed. {issue}3610[3610] +- Set timeout to period instead of 1s by default as documented. -*Filebeat* -- Fix registry migration issue from old states were files were only harvested after second restart. {pull}3322[3322] +*Packetbeat* *Winlogbeat* @@ -40,34 +46,118 @@ https://github.com/elastic/beats/compare/v5.1.2...5.1[Check the HEAD diff] *Affecting all Beats* +*Heartbeat* + *Metricbeat* -*Packetbeat* - *Filebeat* +*Packetbeat* + *Winlogbeat* ==== Deprecated *Affecting all Beats* +*Filebeat* + +*Heartbeat* + *Metricbeat* *Packetbeat* -*Filebeat* - *Winlogbeat* //////////////////////////////////////////////////////////// +[[release-notes-5.2.1]] +=== Beats version 5.2.1 +https://github.com/elastic/beats/compare/v5.2.0...v5.2.1[View commits] + +==== Bugfixes + +*Metricbeat* + +- Fix go routine leak in docker module. {pull}3492[3492] + +*Packetbeat* + +- Fix error in the NFS sample dashboard. {pull}3548[3548] + +*Winlogbeat* + +- Fix error in the Winlogbeat sample dashboard. {pull}3548[3548] + + +[[release-notes-5.2.0]] +=== Beats version 5.2.0 +https://github.com/elastic/beats/compare/v5.1.2...v5.2.0[View commits] + +==== Bugfixes + +*Affecting all Beats* + +- Fix overwriting explicit empty config sections. {issue}2918[2918] + +*Filebeat* + +- Fix alignment issue were Filebeat compiled with Go 1.7.4 was crashing on 32 bits system. {issue}3273[3273] + +*Metricbeat* + +- Fix service times-out at startup. {pull}3056[3056] +- Kafka module case sensitive host name matching. {pull}3193[3193] +- Fix interface conversion panic in couchbase module {pull}3272[3272] + +*Packetbeat* + +- Fix issue where some Cassandra visualizations were showing data from all protocols. {issue}3314[3314] + +==== Added + +*Affecting all Beats* + +- Add support for passing list and dictionary settings via -E flag. +- Support for parsing list and dictionary setting from environment variables. +- Added new flags to import_dashboards (-cacert, -cert, -key, -insecure). {pull}3139[3139] {pull}3163[3163] +- The limit for the number of fields is increased via the mapping template. {pull}3275[3275] +- Updated to Go 1.7.4. {pull}3277[3277] +- Added a NOTICE file containing the notices and licenses of the dependencies. {pull}3334[3334]. + +*Heartbeat* + +- First release, containing monitors for ICMP, TCP, and HTTP. + +*Filebeat* + +- Add enabled config option to prospectors. {pull}3157[3157] +- Add target option for decoded_json_field. {pull}3169[3169] + +*Metricbeat* + +- Kafka module broker matching enhancements. {pull}3129[3129] +- Add a couchbase module with metricsets for node, cluster and bucket. {pull}3081[3081] +- Export number of cores for CPU module. {pull}3192[3192] +- Experimental Prometheus module. {pull}3202[3202] +- Add system socket module that reports all TCP sockets. {pull}3246[3246] +- Kafka consumer groups metricset. {pull}3240[3240] + +*Winlogbeat* + +- Reduced amount of memory allocated while reading event log records. {pull}3113[3113] {pull}3118[3118] + [[release-notes-5.1.2]] === Beats version 5.1.2 https://github.com/elastic/beats/compare/v5.1.1...v5.1.2[View commits] ==== Bugfixes +*Filebeat* + +- Fix registry migration issue from old states where files were only harvested after second restart. {pull}3322[3322] + *Packetbeat* - Fix error on importing dashboards due to colons in the Cassandra dashboard. {issue}3140[3140] @@ -77,6 +167,7 @@ https://github.com/elastic/beats/compare/v5.1.1...v5.1.2[View commits] - Fix for "The array bounds are invalid" error when reading large events. {issue}3076[3076] + [[release-notes-5.1.1]] === Beats version 5.1.1 https://github.com/elastic/beats/compare/v5.0.2...v5.1.1[View commits] @@ -118,6 +209,7 @@ https://github.com/elastic/beats/compare/v5.0.2...v5.1.1[View commits] - Add support for MongoDB 3.4 and WiredTiger metrics. {pull}2999[2999] - Add experimental kafka module with partition metricset. {pull}2969[2969] - Add raw config option for mysql/status metricset. {pull}3001[3001] +- Add command fileds for mysql/status metricset. {pull}3251[3251] *Filebeat* @@ -177,7 +269,7 @@ https://github.com/elastic/beats/compare/v5.0.0...v5.0.1[View commits] *Metricbeat* -- Add username and password config options to the PostgreSQL module. {pull}2890[2890] +- Add username and password config options to the PostgreSQL module. {pull}2889[2890] - Add username and password config options to the MongoDB module. {pull}2889[2889] - Add system core metricset for Windows. {pull}2883[2883] diff --git a/vendor/github.com/elastic/beats/CONTRIBUTING.md b/vendor/github.com/elastic/beats/CONTRIBUTING.md index e8e12983..270b2448 100644 --- a/vendor/github.com/elastic/beats/CONTRIBUTING.md +++ b/vendor/github.com/elastic/beats/CONTRIBUTING.md @@ -51,7 +51,7 @@ Beats](https://github.com/elastic/beats/blob/master/libbeat/docs/communitybeats. The Beats are Go programs, so install the latest version of [golang](http://golang.org/) if you don't have it already. The current Go version -used for development is Golang 1.7.1. +used for development is Golang 1.7.4. The location where you clone is important. Please clone under the source directory of your `GOPATH`. If you don't have `GOPATH` already set, you can diff --git a/vendor/github.com/elastic/beats/Dockerfile b/vendor/github.com/elastic/beats/Dockerfile index 26e57c3e..22d24f42 100644 --- a/vendor/github.com/elastic/beats/Dockerfile +++ b/vendor/github.com/elastic/beats/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.7.1 +FROM golang:1.7.4 MAINTAINER Nicolas Ruflin RUN set -x && \ @@ -11,5 +11,5 @@ COPY libbeat/scripts/docker-entrypoint.sh /entrypoint.sh RUN mkdir -p /etc/pki/tls/certs COPY testing/environments/docker/logstash/pki/tls/certs/logstash.crt /etc/pki/tls/certs/logstash.crt -# Create a copy of the respository inside the container. +# Create a copy of the repository inside the container. COPY . /go/src/github.com/elastic/beats/ diff --git a/vendor/github.com/elastic/beats/Makefile b/vendor/github.com/elastic/beats/Makefile index ebbebef5..e4194460 100644 --- a/vendor/github.com/elastic/beats/Makefile +++ b/vendor/github.com/elastic/beats/Makefile @@ -3,7 +3,7 @@ BUILD_DIR=build COVERAGE_DIR=${BUILD_DIR}/coverage BEATS=packetbeat filebeat winlogbeat metricbeat heartbeat PROJECTS=libbeat ${BEATS} -PROJECTS_ENV=libbeat metricbeat +PROJECTS_ENV=libbeat filebeat metricbeat SNAPSHOT?=yes # Runs complete testsuites (unit, system, integration) for all beats with coverage and race detection. @@ -113,3 +113,9 @@ upload-package: .PHONY: release-upload upload-release: aws s3 cp --recursive --acl public-read build/upload s3://download.elasticsearch.org/beats/ + +.PHONY: notice +notice: + python dev-tools/generate_notice.py . + + diff --git a/vendor/github.com/elastic/beats/NOTICE b/vendor/github.com/elastic/beats/NOTICE new file mode 100644 index 00000000..6f7de24c --- /dev/null +++ b/vendor/github.com/elastic/beats/NOTICE @@ -0,0 +1,1483 @@ +Elastic Beats +Copyright 2014-2017 Elasticsearch BV + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +========================================================================== +Third party libraries used by the Beats project: +========================================================================== + + +-------------------------------------------------------------------- +github.com/andrewkroh/sys +-------------------------------------------------------------------- +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------- +github.com/armon/go-socks5 +-------------------------------------------------------------------- +The MIT License (MIT) + +Copyright (c) 2014 Armon Dadgar + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------- +github.com/davecgh/go-spew +-------------------------------------------------------------------- +Copyright (c) 2012-2013 Dave Collins + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-------------------------------------------------------------------- +github.com/dustin/go-humanize +-------------------------------------------------------------------- +Copyright (c) 2005-2008 Dustin Sallings + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + + +-------------------------------------------------------------------- +github.com/eapache/go-resiliency +-------------------------------------------------------------------- +The MIT License (MIT) + +Copyright (c) 2014 Evan Huus + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +-------------------------------------------------------------------- +github.com/eapache/go-xerial-snappy +-------------------------------------------------------------------- +The MIT License (MIT) + +Copyright (c) 2016 Evan Huus + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------- +github.com/eapache/queue +-------------------------------------------------------------------- +The MIT License (MIT) + +Copyright (c) 2014 Evan Huus + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +-------------------------------------------------------------------- +github.com/elastic/go-lumber +-------------------------------------------------------------------- +Apache License + + +-------------------------------------------------------------------- +github.com/elastic/go-ucfg +-------------------------------------------------------------------- +Apache License + + +-------------------------------------------------------------------- +github.com/elastic/gosigar +-------------------------------------------------------------------- +Apache License + +-------NOTICE----- +Copyright (c) [2009-2011] VMware, Inc. All Rights Reserved. + +This product is licensed to you under the Apache License, Version 2.0 (the "License"). +You may not use this product except in compliance with the License. + +This product includes a number of subcomponents with +separate copyright notices and license terms. Your use of these +subcomponents is subject to the terms and conditions of the +subcomponent's license, as noted in the LICENSE file. +-------------------------------------------------------------------- +github.com/elastic/procfs +-------------------------------------------------------------------- +Apache License + +-------NOTICE----- +procfs provides functions to retrieve system, kernel and process +metrics from the pseudo-filesystem proc. + +Copyright 2014-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). + +-------------------------------------------------------------------- +github.com/garyburd/redigo +-------------------------------------------------------------------- +Apache License + + +-------------------------------------------------------------------- +github.com/gocarina/gocsv +-------------------------------------------------------------------- +The MIT License (MIT) + +Copyright (c) 2014 Jonathan Picques + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +-------------------------------------------------------------------- +github.com/golang/snappy +-------------------------------------------------------------------- +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------- +github.com/joeshaw/multierror +-------------------------------------------------------------------- +The MIT License (MIT) + +Copyright (c) 2014 Joe Shaw + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +-------------------------------------------------------------------- +github.com/klauspost/compress +-------------------------------------------------------------------- +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------- +github.com/klauspost/cpuid +-------------------------------------------------------------------- +The MIT License (MIT) + +Copyright (c) 2015 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +-------------------------------------------------------------------- +github.com/klauspost/crc32 +-------------------------------------------------------------------- +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2015 Klaus Post + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------- +github.com/miekg/dns +-------------------------------------------------------------------- +Extensions of the original work are copyright (c) 2011 Miek Gieben + +As this is fork of the official Go code the same license applies: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------- +github.com/mitchellh/mapstructure +-------------------------------------------------------------------- +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +-------------------------------------------------------------------- +github.com/nranchev/go-libGeoIP +-------------------------------------------------------------------- +Copyright (c) 2010, Nikola Ranchev +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------- +github.com/pierrec/lz4 +-------------------------------------------------------------------- +Copyright (c) 2015, Pierre Curto +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of xxHash nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------- +github.com/pierrec/xxHash +-------------------------------------------------------------------- +Copyright (c) 2014, Pierre Curto +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of xxHash nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------- +github.com/pierrre/gotestcover +-------------------------------------------------------------------- +Copyright (C) 2015 Pierre Durand + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +-------------------------------------------------------------------- +github.com/pkg/errors +-------------------------------------------------------------------- +Copyright (c) 2015, Dave Cheney +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------- +github.com/pmezard/go-difflib +-------------------------------------------------------------------- +Copyright (c) 2013, Patrick Mezard +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + The names of its contributors may not be used to endorse or promote +products derived from this software without specific prior written +permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------- +github.com/rcrowley/go-metrics +-------------------------------------------------------------------- +Copyright 2012 Richard Crowley. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + +THIS SOFTWARE IS PROVIDED BY RICHARD CROWLEY ``AS IS'' AND ANY EXPRESS +OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL RICHARD CROWLEY OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +THE POSSIBILITY OF SUCH DAMAGE. + +The views and conclusions contained in the software and documentation +are those of the authors and should not be interpreted as representing +official policies, either expressed or implied, of Richard Crowley. + +-------------------------------------------------------------------- +github.com/samuel/go-thrift +-------------------------------------------------------------------- +Copyright (c) 2012, Samuel Stauffer +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. +* Neither the name of the author nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------- +github.com/satori/go.uuid +-------------------------------------------------------------------- +Copyright (C) 2013-2016 by Maxim Bublis + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------- +github.com/shirou/gopsutil +-------------------------------------------------------------------- +gopsutil is distributed under BSD license reproduced below. + +Copyright (c) 2014, WAKAYAMA Shirou +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of the gopsutil authors nor the names of its contributors + may be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------- +github.com/StackExchange/wmi +-------------------------------------------------------------------- +The MIT License (MIT) + +Copyright (c) 2013 Stack Exchange + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------- +github.com/stretchr/objx +-------------------------------------------------------------------- +objx - by Mat Ryer and Tyler Bunnell + +The MIT License (MIT) + +Copyright (c) 2014 Stretchr, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------- +github.com/stretchr/testify +-------------------------------------------------------------------- +Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell + +Please consider promoting this project if you find it useful. + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of the Software, +and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT +OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE +OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------- +github.com/tsg/gopacket +-------------------------------------------------------------------- +Copyright (c) 2012 Google, Inc. All rights reserved. +Copyright (c) 2009-2011 Andreas Krennmair. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Andreas Krennmair, Google, nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------- +golang.org/x/net +-------------------------------------------------------------------- +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------- +golang.org/x/sys +-------------------------------------------------------------------- +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------- +golang.org/x/text +-------------------------------------------------------------------- +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------- +gopkg.in/mgo.v2/bson +-------------------------------------------------------------------- +BSON library for Go + +Copyright (c) 2010-2012 - Gustavo Niemeyer + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------- +github.com/mitchellh/hashstructure +-------------------------------------------------------------------- +The MIT License (MIT) + +Copyright (c) 2016 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +-------------------------------------------------------------------- +github.com/fsouza/go-dockerclient +-------------------------------------------------------------------- +Copyright (c) 2016, go-dockerclient authors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------- +github.com/go-sql-driver/mysql +-------------------------------------------------------------------- +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. + +-------------------------------------------------------------------- +github.com/lib/pq +-------------------------------------------------------------------- +Copyright (c) 2011-2013, 'pq' Contributors +Portions Copyright (C) 2011 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------- +github.com/davecgh/go-spew +-------------------------------------------------------------------- +Copyright (c) 2012-2013 Dave Collins + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-------------------------------------------------------------------- +github.com/pmezard/go-difflib +-------------------------------------------------------------------- +Copyright (c) 2013, Patrick Mezard +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + The names of its contributors may not be used to endorse or promote +products derived from this software without specific prior written +permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------- +github.com/stretchr/objx +-------------------------------------------------------------------- +objx - by Mat Ryer and Tyler Bunnell + +The MIT License (MIT) + +Copyright (c) 2014 Stretchr, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/elastic/beats/dev-tools/common.bash b/vendor/github.com/elastic/beats/dev-tools/common.bash new file mode 100644 index 00000000..64bf8cf4 --- /dev/null +++ b/vendor/github.com/elastic/beats/dev-tools/common.bash @@ -0,0 +1,80 @@ +# +# File: common.bash +# +# Common bash routines. +# + +# Script directory: +_sdir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +# debug "msg" +# Write a debug message to stderr. +debug() +{ + if [ "$VERBOSE" == "true" ]; then + echo "DEBUG: $1" >&2 + fi +} + +# err "msg" +# Write and error message to stderr. +err() +{ + echo "ERROR: $1" >&2 +} + +# get_go_version +# Read the project's Go version and return it in the GO_VERSION variable. +# On failure it will exit. +get_go_version() { + GO_VERSION=$(awk '/^:go-version:/{print $NF}' "${_sdir}/../libbeat/docs/version.asciidoc") + if [ -z "$GO_VERSION" ]; then + err "Failed to detect the project's Go version" + exit 1 + fi +} + +# install_gimme +# Install gimme to HOME/bin. +install_gimme() { + # Install gimme + if [ ! -f "${HOME}/bin/gimme" ]; then + mkdir -p ${HOME}/bin + curl -sL -o ${HOME}/bin/gimme https://raw.githubusercontent.com/travis-ci/gimme/v1.1.0/gimme + chmod +x ${HOME}/bin/gimme + fi + + GIMME="${HOME}/bin/gimme" + debug "Gimme version $(${GIMME} version)" +} + +# setup_go_root "version" +# This configures the Go version being used. It sets GOROOT and adds +# GOROOT/bin to the PATH. It uses gimme to download the Go version if +# it does not already exist in the ~/.gimme dir. +setup_go_root() { + local version=${1} + + install_gimme + + # Setup GOROOT and add go to the PATH. + ${GIMME} "${version}" > /dev/null + source "${HOME}/.gimme/envs/go${version}.env" 2> /dev/null + + debug "$(go version)" +} + +# setup_go_path "gopath" +# This sets GOPATH and adds GOPATH/bin to the PATH. +setup_go_path() { + local gopath="${1}" + if [ -z "$gopath" ]; then return; fi + + # Setup GOPATH. + export GOPATH="${gopath}" + + # Add GOPATH to PATH. + export PATH="${GOPATH}/bin:${PATH}" + + debug "GOPATH=${GOPATH}" +} diff --git a/vendor/github.com/elastic/beats/dev-tools/generate_notice.py b/vendor/github.com/elastic/beats/dev-tools/generate_notice.py new file mode 100644 index 00000000..bb309806 --- /dev/null +++ b/vendor/github.com/elastic/beats/dev-tools/generate_notice.py @@ -0,0 +1,98 @@ +import glob +import sys +import os +import datetime +import argparse + + +def read_file(filename): + + if not os.path.isfile(filename): + print("File not found {}".format(filename)) + return "" + + with open(filename, 'r') as f: + file_content = f.read() + return file_content + + +def get_library_name(license): + + lib = "" + path = os.path.dirname(license) + # get the last three directories + for i in range(0, 3): + path, x = os.path.split(path) + if len(lib) == 0: + lib = x + elif len(x) > 0: + lib = x + "/" + lib + + return lib + + +def add_licenses(f, licenses): + + for license in licenses: + for license_file in glob.glob(license): + f.write("\n--------------------------------------------------------------------\n") + f.write("{}\n".format(get_library_name(license_file))) + f.write("--------------------------------------------------------------------\n") + copyright = read_file(license_file) + if "Apache License" not in copyright: + f.write(copyright) + else: + # it's an Apache License, so include only the NOTICE file + f.write("Apache License\n\n") + for notice_file in glob.glob(os.path.join(os.path.dirname(license_file), "NOTICE*")): + f.write("-------{}-----\n".format(os.path.basename(notice_file))) + f.write(read_file(notice_file)) + + + +def create_notice(filename, beat, copyright, licenses): + + now = datetime.datetime.now() + + with open(filename, "w+") as f: + + # Add header + f.write("{}\n".format(beat)) + f.write("Copyright 2014-{0} {1}\n".format(now.year, copyright)) + f.write("\n") + f.write("This product includes software developed by The Apache Software \nFoundation (http://www.apache.org/).\n\n") + + # Add licenses for 3rd party libraries + f.write("==========================================================================\n") + f.write("Third party libraries used by the Beats project:\n") + f.write("==========================================================================\n\n") + add_licenses(f, licenses) + + +if __name__ == "__main__": + + parser = argparse.ArgumentParser( + description="Generate the NOTICE file from all vendor directories available in a given directory") + parser.add_argument("vendor", + help="directory where to search for vendor directories") + parser.add_argument("-b", "--beat", default="Elastic Beats", + help="Beat name") + parser.add_argument("-c", "--copyright", default="Elasticsearch BV", + help="copyright owner") + + args = parser.parse_args() + + cwd = os.getcwd() + notice = os.path.join(cwd, "NOTICE") + licenses = [] + + for root, dirs, files in os.walk(args.vendor): + if 'vendor' in dirs: + license = os.path.join(os.path.join(root, 'vendor'), + '**/**/**/LICENSE*') + licenses.append(license) + + print("Get the licenses available from {}".format(licenses)) + create_notice(notice, args.beat, args.copyright, licenses) + + print("Available at {}\n".format(notice)) diff --git a/vendor/github.com/elastic/beats/dev-tools/get_version b/vendor/github.com/elastic/beats/dev-tools/get_version index 26162f9b..500cc168 100755 --- a/vendor/github.com/elastic/beats/dev-tools/get_version +++ b/vendor/github.com/elastic/beats/dev-tools/get_version @@ -1,20 +1,37 @@ #!/usr/bin/env python import os +import re import argparse -pattern = '''const defaultBeatVersion = "''' +pattern = re.compile(r'(const\s|)\w*(v|V)ersion\s=\s"(?P.*)"') +vendored_libbeat = os.path.normpath("vendor/github.com/elastic/beats") + + +def get_filepath(filename): + script_directory = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) + index = script_directory.find(vendored_libbeat) + if index > 0: + # Community beat detected + filename = os.path.join(script_directory[:index], filename) + if os.path.exists(filename): + return filename # Community beat version exists + return os.path.abspath(os.path.join(script_directory, os.pardir, "libbeat","beat","version.go")) def main(): parser = argparse.ArgumentParser( description="Prints the current version at stdout.") - parser.parse_args() + args = parser.parse_args() - dir = os.path.dirname(os.path.realpath(__file__)) - with open(dir + "/../libbeat/beat/version.go", "r") as f: + goversion_filepath = get_filepath("version.go") + + with open(goversion_filepath, "r") as f: for line in f: - if line.startswith(pattern): - print(line[len(pattern):-2]) # -2 for \n and the final quote + match = pattern.match(line) + if match: + print(match.group('version')) + return + print ("No version found in file {}".format(goversion_filepath)) if __name__ == "__main__": main() diff --git a/vendor/github.com/elastic/beats/dev-tools/jenkins_ci b/vendor/github.com/elastic/beats/dev-tools/jenkins_ci new file mode 100755 index 00000000..f25a368a --- /dev/null +++ b/vendor/github.com/elastic/beats/dev-tools/jenkins_ci @@ -0,0 +1,143 @@ +#!/usr/bin/env bash +set -e + +# Script directory: +SDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +SNAME="$(basename "$0")" + +source "${SDIR}/common.bash" + +usage() { +cat << EOF + Usage: $SNAME [-d] [-h] [-v] [-r] [-w=WORKSPACE] (-g|-b|-c) + + Description: Executes a build using the project's Go version. + + Options: + -w=WORKSPACE Required. Specifies the path to the Jenkins workspace. + If not set then the WORKSPACE environment variable is + used. The workspace will be treated as the GOPATH. + -b | --build Perform a build which includes make targets: check, + testsuite, coverage-report, and docs. + -c | --cleanup Clean up after the build by removing the checkout of + elastic/docs and stopping any running containers + started by the build. This cannot be specified with + --build. + -g | --go-version Optional. Write the project's Go version to stdout + and then exits. Can be used to setup Go with + eval "\$(gimme \$(./jenkins_ci -g))". + -i | --install-gimme Optional. Installs gimme to HOME/bin. + -r | --race Optional. Enable the Go race detector for tests that + are run. + -d | --debug Optional. Runs the script with 'set -x' to log a trace + of all commands and their arguments being executed. + -v | --verbose Optional. Enable verbose logging from this script to stderr. + -h | --help Optional. Print this usage information. + + Examples: + Print project Go version: ./$SNAME --go-version + Build with race detector: ./$SNAME -b -r + Stop test environment: ./$SNAME -c + + Jenkins Setup: + + 1) Jenkins should be setup to checkout elastic/beats into + \$WORKSPACE/src/github.com/elastic/ + 2) The single build script should be added that executes + \$WORKSPACE/src/github.com/elastic/beats/dev-tools/$SNAME -d -v -b --race + 3) A post build action should be added that executes + \$WORKSPACE/src/github.com/elastic/beats/dev-tools/$SNAME -d -v -c +EOF +} + +# Parse command line arguments. +parse_args() { + for i in "$@" + do + case $i in + -b|--build) + BUILD=true + shift + ;; + -c|--cleanup) + CLEANUP=true + shift + ;; + -d|--debug) + set -x + shift + ;; + -g|--go-version) + get_go_version + echo "${GO_VERSION}" + exit 0 + ;; + -h|--help) + usage + exit 1 + ;; + -i|--install-gimme) + install_gimme + exit 0 + ;; + -r|--race) + export RACE_DETECTOR=1 + shift + ;; + -v|--verbose) + VERBOSE=true + shift + ;; + -w=*|--workspace=*) + WORKSPACE="${i#*=}" + shift + ;; + *) + echo "Invalid argument: $i" + usage + exit 1 + ;; + esac + done + + if [ -z "$WORKSPACE" ]; then + err "WORKSPACE env var must be set or --workspace must be specified" + exit 1 + fi +} + +build() { + make check + make testsuite + make coverage-report + make docs +} + +cleanup() { + # Remove the checkout of elastic/docs if it exists. + rm -rf "${SDIR}/../build/docs" + + make stop-environments +} + +main() { + cd "${SDIR}/.." + parse_args $* + get_go_version + setup_go_root ${GO_VERSION} + setup_go_path ${WORKSPACE} + + if [ "$BUILD" == "true" ] && [ "$CLEANUP" == "true" ]; then + err "--build and --cleanup cannot be used together" + exit 1 + elif [ "$BUILD" == "true" ]; then + build + elif [ "$CLEANUP" == "true" ]; then + cleanup + else + err "Use either --build or --cleanup" + exit 1 + fi +} + +main $* diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/Makefile b/vendor/github.com/elastic/beats/dev-tools/packer/Makefile index 4065d51f..ee09bb9a 100644 --- a/vendor/github.com/elastic/beats/dev-tools/packer/Makefile +++ b/vendor/github.com/elastic/beats/dev-tools/packer/Makefile @@ -1,6 +1,9 @@ BUILDID?=$(shell git rev-parse HEAD) SNAPSHOT?=yes +BEATS_BUILDER_IMAGE?=tudorg/beats-builder +BEATS_BUILDER_DEB6_IMAGE?=tudorg/beats-builder-deb6 + makefile_abspath:=$(abspath $(lastword $(MAKEFILE_LIST))) packer_absdir=$(shell dirname ${makefile_abspath}) beat_abspath=${GOPATH}/src/${BEAT_DIR} @@ -73,29 +76,29 @@ run-interactive-builder-deb6: docker run -t -i -v $(shell pwd)/build:/build \ -v $(shell pwd)/xgo-scripts/:/scripts \ -v $(shell pwd)/../..:/source \ - --entrypoint=bash tudorg/beats-builder-deb6 + --entrypoint=bash ${BEATS_BUILDER_DEB6_IMAGE} .PHONY: run-interactive-builder run-interactive-builder: docker run -t -i -v $(shell pwd)/build:/build \ -v $(packer_absdir)/xgo-scripts/:/scripts \ -v $(shell pwd)/../..:/source \ - --entrypoint=bash tudorg/beats-builder + --entrypoint=bash ${BEATS_BUILDER_IMAGE} .PHONY: images images: xgo-image fpm-image go-daemon-image .PHONY: push-images push-images: - docker push tudorg/beats-builder - docker push tudorg/beats-builder-deb6 + docker push ${BEATS_BUILDER_IMAGE} + docker push ${BEATS_BUILDER_DEB6_IMAGE} docker push tudorg/fpm docker push tudorg/go-daemon .PHONY: pull-images pull-images: - docker pull tudorg/beats-builder - docker pull tudorg/beats-builder-deb6 + docker pull ${BEATS_BUILDER_IMAGE} + docker pull ${BEATS_BUILDER_DEB6_IMAGE} docker pull tudorg/fpm docker pull tudorg/go-daemon @@ -115,8 +118,8 @@ endef .PHONY: clean-images clean-images: - @$(call rm-image,tudorg/beats-builder-deb6) - @$(call rm-image,tudorg/beats-builder) + @$(call rm-image, ${BEATS_BUILDER_DEB6_IMAGE}) + @$(call rm-image, ${BEATS_BUILDER_IMAGE}) .PHONY: clean clean: diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb6/beats-builder/Dockerfile b/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb6/beats-builder/Dockerfile index f258ae96..509f94ed 100644 --- a/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb6/beats-builder/Dockerfile +++ b/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb6/beats-builder/Dockerfile @@ -1,4 +1,4 @@ -FROM tudorg/xgo-deb6-1.7.1 +FROM tudorg/xgo-deb6-1.7.4 MAINTAINER Tudor Golubenco diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb6/build.sh b/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb6/build.sh index 5d46a4b2..df68d8af 100755 --- a/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb6/build.sh +++ b/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb6/build.sh @@ -1,5 +1,5 @@ #!/bin/sh docker build --rm=true -t tudorg/xgo-deb6-base base/ && \ - docker build --rm=true -t tudorg/xgo-deb6-1.7.1 go-1.7.1/ && + docker build --rm=true -t tudorg/xgo-deb6-1.7.4 go-1.7.4/ && docker build --rm=true -t tudorg/beats-builder-deb6 beats-builder diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb6/go-1.7.4/Dockerfile b/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb6/go-1.7.4/Dockerfile new file mode 100644 index 00000000..075478f5 --- /dev/null +++ b/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb6/go-1.7.4/Dockerfile @@ -0,0 +1,15 @@ +# Go cross compiler (xgo): Go 1.7.4 layer +# Copyright (c) 2014 Péter Szilágyi. All rights reserved. +# +# Released under the MIT license. + +FROM tudorg/xgo-deb6-base + +MAINTAINER Tudor Golubenco + +# Configure the root Go distribution and bootstrap based on it +RUN \ + export ROOT_DIST="https://storage.googleapis.com/golang/go1.7.4.linux-amd64.tar.gz" && \ + export ROOT_DIST_SHA1="2e5baf03d1590e048c84d1d5b4b6f2540efaaea1" && \ + \ + $BOOTSTRAP_PURE diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/base/build.sh b/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/base/build.sh index 7b6e9408..6dfd0b32 100644 --- a/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/base/build.sh +++ b/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/base/build.sh @@ -121,7 +121,6 @@ if [ "$TARGETS" == "" ]; then fi -built_targets=0 for TARGET in $TARGETS; do # Split the target into platform and architecture XGOOS=`echo $TARGET | cut -d '/' -f 1` @@ -134,23 +133,20 @@ for TARGET in $TARGETS; do export PKG_CONFIG_PATH=/usr/aarch64-linux-gnu/lib/pkgconfig GOOS=linux GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} go get -d ./$PACK - sh -c "GOOS=linux GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} go build $V $R $LDARGS -o $NAME-linux-amd64$R ./$PACK" - built_targets=$((built_targets+1)) + sh -c "GOOS=linux GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} go build $V $R $LDARGS -o /build/$NAME-linux-amd64$R ./$PACK" fi if ([ $XGOOS == "." ] || [ $XGOOS == "linux" ]) && ([ $XGOARCH == "." ] || [ $XGOARCH == "386" ]); then echo "Compiling $PACK for linux/386..." CFLAGS=-m32 CXXFLAGS=-m32 LDFLAGS=-m32 HOST=i686-linux PREFIX=/usr/local $BUILD_DEPS /deps $LIST_DEPS GOOS=linux GOARCH=386 CGO_ENABLED=${CGO_ENABLED} go get -d ./$PACK - sh -c "GOOS=linux GOARCH=386 CGO_ENABLED=${CGO_ENABLED} go build $V $R $LDARGS -o $NAME-linux-386$R ./$PACK" - built_targets=$((built_targets+1)) + sh -c "GOOS=linux GOARCH=386 CGO_ENABLED=${CGO_ENABLED} go build $V $R $LDARGS -o /build/$NAME-linux-386$R ./$PACK" fi if ([ $XGOOS == "." ] || [ $XGOOS == "linux" ]) && ([ $XGOARCH == "." ] || [ $XGOARCH == "arm" ]); then echo "Compiling $PACK for linux/arm..." CC=arm-linux-gnueabi-gcc CXX=rm-linux-gnueabi-g++ HOST=arm-linux PREFIX=/usr/local/arm $BUILD_DEPS /deps $LIST_DEPS CC=arm-linux-gnueabi-gcc CXX=rm-linux-gnueabi-g++ GOOS=linux GOARCH=arm CGO_ENABLED=${CGO_ENABLED} GOARM=5 go get -d ./$PACK - CC=arm-linux-gnueabi-gcc CXX=rm-linux-gnueabi-g++ GOOS=linux GOARCH=arm CGO_ENABLED=${CGO_ENABLED} GOARM=5 go build $V -o $NAME-linux-arm ./$PACK - built_targets=$((built_targets+1)) + CC=arm-linux-gnueabi-gcc CXX=rm-linux-gnueabi-g++ GOOS=linux GOARCH=arm CGO_ENABLED=${CGO_ENABLED} GOARM=5 go build $V -o /build/$NAME-linux-arm ./$PACK fi # Check and build for Windows targets @@ -174,8 +170,7 @@ for TARGET in $TARGETS; do export PKG_CONFIG_PATH=/usr/x86_64-w64-mingw32/lib/pkgconfig CC=x86_64-w64-mingw32-gcc CXX=x86_64-w64-mingw32-g++ GOOS=windows GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} CGO_CFLAGS="$CGO_NTDEF" CGO_CXXFLAGS="$CGO_NTDEF" go get -d ./$PACK - CC=x86_64-w64-mingw32-gcc CXX=x86_64-w64-mingw32-g++ GOOS=windows GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} CGO_CFLAGS="$CGO_NTDEF" CGO_CXXFLAGS="$CGO_NTDEF" go build $V $R -o $NAME-windows-amd64$R.exe ./$PACK - built_targets=$((built_targets+1)) + CC=x86_64-w64-mingw32-gcc CXX=x86_64-w64-mingw32-g++ GOOS=windows GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} CGO_CFLAGS="$CGO_NTDEF" CGO_CXXFLAGS="$CGO_NTDEF" go build $V $R -o /build/$NAME-windows-amd64$R.exe ./$PACK fi if [ $XGOARCH == "." ] || [ $XGOARCH == "386" ]; then @@ -184,8 +179,7 @@ for TARGET in $TARGETS; do export PKG_CONFIG_PATH=/usr/i686-w64-mingw32/lib/pkgconfig CC=i686-w64-mingw32-gcc CXX=i686-w64-mingw32-g++ GOOS=windows GOARCH=386 CGO_ENABLED=${CGO_ENABLED} CGO_CFLAGS="$CGO_NTDEF" CGO_CXXFLAGS="$CGO_NTDEF" go get -d ./$PACK - CC=i686-w64-mingw32-gcc CXX=i686-w64-mingw32-g++ GOOS=windows GOARCH=386 CGO_ENABLED=${CGO_ENABLED} CGO_CFLAGS="$CGO_NTDEF" CGO_CXXFLAGS="$CGO_NTDEF" go build $V -o $NAME-windows-386.exe ./$PACK - built_targets=$((built_targets+1)) + CC=i686-w64-mingw32-gcc CXX=i686-w64-mingw32-g++ GOOS=windows GOARCH=386 CGO_ENABLED=${CGO_ENABLED} CGO_CFLAGS="$CGO_NTDEF" CGO_CXXFLAGS="$CGO_NTDEF" go build $V -o /build/$NAME-windows-386.exe ./$PACK fi fi @@ -194,22 +188,14 @@ for TARGET in $TARGETS; do echo "Compiling $PACK for darwin/amd64..." CC=o64-clang CXX=o64-clang++ HOST=x86_64-apple-darwin10 PREFIX=/usr/local $BUILD_DEPS /deps $LIST_DEPS CC=o64-clang CXX=o64-clang++ GOOS=darwin GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} go get -d ./$PACK - CC=o64-clang CXX=o64-clang++ GOOS=darwin GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} go build -ldflags=-s $V $R -o $NAME-darwin-amd64$R ./$PACK - built_targets=$((built_targets+1)) + CC=o64-clang CXX=o64-clang++ GOOS=darwin GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} go build -ldflags=-s $V $R -o /build/$NAME-darwin-amd64$R ./$PACK fi if ([ $XGOOS == "." ] || [ $XGOOS == "darwin" ]) && ([ $XGOARCH == "." ] || [ $XGOARCH == "386" ]); then echo "Compiling for darwin/386..." CC=o32-clang CXX=o32-clang++ HOST=i386-apple-darwin10 PREFIX=/usr/local $BUILD_DEPS /deps $LIST_DEPS CC=o32-clang CXX=o32-clang++ GOOS=darwin GOARCH=386 CGO_ENABLED=${CGO_ENABLED} go get -d ./$PACK - CC=o32-clang CXX=o32-clang++ GOOS=darwin GOARCH=386 CGO_ENABLED=${CGO_ENABLED} go build $V -o $NAME-darwin-386 ./$PACK - built_targets=$((built_targets+1)) + CC=o32-clang CXX=o32-clang++ GOOS=darwin GOARCH=386 CGO_ENABLED=${CGO_ENABLED} go build $V -o /build/$NAME-darwin-386 ./$PACK fi done - -# The binary files are the last created files -echo "Moving $built_targets $PACK binaries to host folder..." -ls -t | head -n $built_targets -cp `ls -t | head -n $built_targets ` /build - echo "Build process completed" diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/beats-builder/Dockerfile b/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/beats-builder/Dockerfile index 48f6ee8c..342bb3e0 100644 --- a/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/beats-builder/Dockerfile +++ b/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/beats-builder/Dockerfile @@ -1,4 +1,4 @@ -FROM tudorg/xgo-1.7.1 +FROM tudorg/xgo-1.7.4 MAINTAINER Tudor Golubenco diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/build.sh b/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/build.sh index 8775a9c5..6c70b820 100755 --- a/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/build.sh +++ b/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/build.sh @@ -1,5 +1,5 @@ #!/bin/sh docker build --rm=true -t tudorg/xgo-base base/ && \ - docker build --rm=true -t tudorg/xgo-1.7.1 go-1.7.1/ && + docker build --rm=true -t tudorg/xgo-1.7.4 go-1.7.4/ && docker build --rm=true -t tudorg/beats-builder beats-builder diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/go-1.7.4/Dockerfile b/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/go-1.7.4/Dockerfile new file mode 100644 index 00000000..4f19a095 --- /dev/null +++ b/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/go-1.7.4/Dockerfile @@ -0,0 +1,15 @@ +# Go cross compiler (xgo): Go 1.7.4 layer +# Copyright (c) 2014 Péter Szilágyi. All rights reserved. +# +# Released under the MIT license. + +FROM tudorg/xgo-base + +MAINTAINER Tudor Golubenco + +# Configure the root Go distribution and bootstrap based on it +RUN \ + export ROOT_DIST="https://storage.googleapis.com/golang/go1.7.4.linux-amd64.tar.gz" && \ + export ROOT_DIST_SHA1="2e5baf03d1590e048c84d1d5b4b6f2540efaaea1" && \ + \ + $BOOTSTRAP_PURE diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/platforms/binary/run.sh.j2 b/vendor/github.com/elastic/beats/dev-tools/packer/platforms/binary/run.sh.j2 index dbd1d3ff..9ae01a10 100644 --- a/vendor/github.com/elastic/beats/dev-tools/packer/platforms/binary/run.sh.j2 +++ b/vendor/github.com/elastic/beats/dev-tools/packer/platforms/binary/run.sh.j2 @@ -11,7 +11,7 @@ if [ "$SNAPSHOT" = "yes" ]; then fi mkdir /{{.beat_name}}-${VERSION}-linux-{{.bin_arch}} -cp -R homedir/. /{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}/ +cp -a homedir/. /{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}/ install -D -m 755 import_dashboards-linux-{{.arch}} /{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}/scripts/import_dashboards cp {{.beat_name}}-linux-{{.arch}} /{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}/{{.beat_name}} cp {{.beat_name}}-linux.yml /{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}/{{.beat_name}}.yml diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/platforms/centos/run.sh.j2 b/vendor/github.com/elastic/beats/dev-tools/packer/platforms/centos/run.sh.j2 index 1c35281e..d5df40c6 100644 --- a/vendor/github.com/elastic/beats/dev-tools/packer/platforms/centos/run.sh.j2 +++ b/vendor/github.com/elastic/beats/dev-tools/packer/platforms/centos/run.sh.j2 @@ -24,8 +24,8 @@ RPM_VERSION=`echo ${VERSION} | sed 's/-/_/g'` fpm --force -s dir -t rpm \ -n {{.beat_name}} -v ${RPM_VERSION} \ --architecture {{.rpm_arch}} \ - --vendor "Elastic" \ - --license "ASL 2.0" \ + --vendor "{{.beat_vendor}}" \ + --license "{{.beat_license}}" \ --description "{{.beat_description}}" \ --url {{.beat_url}} \ --rpm-init /tmp/{{.beat_name}}.init \ diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/platforms/centos/systemd.j2 b/vendor/github.com/elastic/beats/dev-tools/packer/platforms/centos/systemd.j2 index 457f289f..8f14e9ef 100644 --- a/vendor/github.com/elastic/beats/dev-tools/packer/platforms/centos/systemd.j2 +++ b/vendor/github.com/elastic/beats/dev-tools/packer/platforms/centos/systemd.j2 @@ -1,6 +1,6 @@ [Unit] Description={{.beat_name}} -Documentation=https://www.elastic.co/guide/en/beats/{{.beat_name}}/current/index.html +Documentation={{.beat_doc_url}} Wants=network-online.target After=network-online.target diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/platforms/darwin/run.sh.j2 b/vendor/github.com/elastic/beats/dev-tools/packer/platforms/darwin/run.sh.j2 index 8605d2ee..f246aeec 100644 --- a/vendor/github.com/elastic/beats/dev-tools/packer/platforms/darwin/run.sh.j2 +++ b/vendor/github.com/elastic/beats/dev-tools/packer/platforms/darwin/run.sh.j2 @@ -11,7 +11,7 @@ if [ "$SNAPSHOT" = "yes" ]; then fi mkdir /{{.beat_name}}-${VERSION}-darwin-x86_64 -cp -R homedir/. /{{.beat_name}}-${VERSION}-darwin-x86_64/ +cp -a homedir/. /{{.beat_name}}-${VERSION}-darwin-x86_64/ install -D -m 755 import_dashboards-darwin-{{.arch}} /{{.beat_name}}-${VERSION}-darwin-x86_64/scripts/import_dashboards cp {{.beat_name}}-darwin-amd64 /{{.beat_name}}-${VERSION}-darwin-x86_64/{{.beat_name}} cp {{.beat_name}}-darwin.yml /{{.beat_name}}-${VERSION}-darwin-x86_64/{{.beat_name}}.yml diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/platforms/debian/run.sh.j2 b/vendor/github.com/elastic/beats/dev-tools/packer/platforms/debian/run.sh.j2 index 6815062e..95ff0179 100644 --- a/vendor/github.com/elastic/beats/dev-tools/packer/platforms/debian/run.sh.j2 +++ b/vendor/github.com/elastic/beats/dev-tools/packer/platforms/debian/run.sh.j2 @@ -20,8 +20,8 @@ fi # create deb fpm --force -s dir -t deb \ -n {{.beat_name}} -v ${VERSION} \ - --vendor "Elastic" \ - --license "ASL 2.0" \ + --vendor "{{.beat_vendor}}" \ + --license "{{.beat_license}}" \ --architecture {{.deb_arch}} \ --description "{{.beat_description}}" \ --url {{.beat_url}} \ diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/platforms/debian/systemd.j2 b/vendor/github.com/elastic/beats/dev-tools/packer/platforms/debian/systemd.j2 index 457f289f..8f14e9ef 100644 --- a/vendor/github.com/elastic/beats/dev-tools/packer/platforms/debian/systemd.j2 +++ b/vendor/github.com/elastic/beats/dev-tools/packer/platforms/debian/systemd.j2 @@ -1,6 +1,6 @@ [Unit] Description={{.beat_name}} -Documentation=https://www.elastic.co/guide/en/beats/{{.beat_name}}/current/index.html +Documentation={{.beat_doc_url}} Wants=network-online.target After=network-online.target diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/platforms/windows/run.sh.j2 b/vendor/github.com/elastic/beats/dev-tools/packer/platforms/windows/run.sh.j2 index 03447404..d35643cd 100644 --- a/vendor/github.com/elastic/beats/dev-tools/packer/platforms/windows/run.sh.j2 +++ b/vendor/github.com/elastic/beats/dev-tools/packer/platforms/windows/run.sh.j2 @@ -11,7 +11,7 @@ if [ "$SNAPSHOT" = "yes" ]; then fi mkdir /{{.beat_name}}-${VERSION}-windows-{{.win_arch}} -cp -R homedir/. /{{.beat_name}}-${VERSION}-windows-{{.win_arch}}/ +cp -a homedir/. /{{.beat_name}}-${VERSION}-windows-{{.win_arch}}/ install -D -m 755 import_dashboards-windows-{{.arch}} /{{.beat_name}}-${VERSION}-windows-{{.win_arch}}/scripts/import_dashboards.exe cp {{.beat_name}}-windows-{{.arch}}.exe /{{.beat_name}}-${VERSION}-windows-{{.win_arch}}/{{.beat_name}}.exe unix2dos {{.beat_name}}-win.yml diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/version.yml b/vendor/github.com/elastic/beats/dev-tools/packer/version.yml index 637accb9..29236788 100644 --- a/vendor/github.com/elastic/beats/dev-tools/packer/version.yml +++ b/vendor/github.com/elastic/beats/dev-tools/packer/version.yml @@ -1 +1 @@ -version: "0.2.0" +version: "5.2.2" diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/xgo-scripts/before_build.sh b/vendor/github.com/elastic/beats/dev-tools/packer/xgo-scripts/before_build.sh index 25a3cb00..9cd82dda 100755 --- a/vendor/github.com/elastic/beats/dev-tools/packer/xgo-scripts/before_build.sh +++ b/vendor/github.com/elastic/beats/dev-tools/packer/xgo-scripts/before_build.sh @@ -46,7 +46,7 @@ fi # with git 1.7 git clone https://github.com/tsg/gotpl.git /go/src/github.com/tsg/gotpl mkdir -p /go/src/gopkg.in/yaml.v2 -#cp -r /go/src/github.com/elastic/beats/vendor/gopkg.in/yaml.v2 /go/src/gopkg.in/ + cp -r $LIBBEAT_PATH/../vendor/gopkg.in/yaml.v2 /go/src/gopkg.in/ go install github.com/tsg/gotpl @@ -54,7 +54,7 @@ go install github.com/tsg/gotpl cat ${LIBBEAT_PATH}/docs/version.asciidoc >> ${PREFIX}/package.yml # Make variable naming of doc-branch compatible with gotpl. Generate and copy README.md into homedir sed -i -e 's/:doc-branch/doc_branch/g' ${PREFIX}/package.yml -/go/bin/gotpl ${LIBBEAT_PATH}/../dev-tools/packer/readme.md.j2 < ${PREFIX}/package.yml > ${PREFIX}/homedir/README.md +/go/bin/gotpl /templates/readme.md.j2 < ${PREFIX}/package.yml > ${PREFIX}/homedir/README.md # Copy template cp $BEATNAME.template.json $PREFIX/$BEATNAME.template.json diff --git a/vendor/github.com/elastic/beats/dev-tools/run_with_go_ver b/vendor/github.com/elastic/beats/dev-tools/run_with_go_ver new file mode 100755 index 00000000..0e981023 --- /dev/null +++ b/vendor/github.com/elastic/beats/dev-tools/run_with_go_ver @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +# +# This sets up Go based on the project's Go version. It will configure +# GOROOT and add GOROOT/bin to PATH before executing the given command. +# +# Example usage: ./run_with_go_ver go version +# +set -e + +# Script directory: +SDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +source "${SDIR}/common.bash" + +get_go_version +setup_go_root ${GO_VERSION} +bash -c "$*" diff --git a/vendor/github.com/elastic/beats/dev-tools/set_version b/vendor/github.com/elastic/beats/dev-tools/set_version index dfabee3b..30a7414a 100755 --- a/vendor/github.com/elastic/beats/dev-tools/set_version +++ b/vendor/github.com/elastic/beats/dev-tools/set_version @@ -1,38 +1,70 @@ #!/usr/bin/env python -import os import argparse +import os +import re +import sys from subprocess import check_call -template_go = '''package beat +vendored_libbeat = os.path.normpath("vendor/github.com/elastic/beats") -const defaultBeatVersion = "{}" + +goversion_template = '''package main + +const appVersion = "{version}" ''' -template_packer = '''version: "{version}" +goversion_template_libbeat = '''package beat + +const defaultBeatVersion = "{version}" ''' +yamlversion_template = '''version: "{version}" +''' + +def get_rootfolder(): + vendored_libbeat = os.path.normpath("vendor/github.com/elastic/beats") + script_directory = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) + index = script_directory.find(vendored_libbeat) + if index > 0: + # Community beat detected, version files are stored at the root folder of the project + return os.path.abspath(script_directory[:index]) + + # Libbeat detected + return os.path.dirname(script_directory) + +def create_from_template(filename, template, version): + with open(filename, "w") as f: + f.write(template.format(version=version)) + print ("Set version {} in file {}".format(version, filename)) + def main(): parser = argparse.ArgumentParser( description="Used to set the current version. Doesn't commit changes.") parser.add_argument("version", help="The new version") args = parser.parse_args() - - dir = os.path.dirname(os.path.realpath(__file__)) - with open(dir + "/../libbeat/beat/version.go", "w") as f: - f.write(template_go.format(args.version)) - version = args.version - with open(dir + "/packer/version.yml", "w") as f: - f.write(template_packer.format( - version=version, - )) - # Updates all files with the new templates - os.chdir(dir + "/../") + is_libbeat = vendored_libbeat not in os.path.realpath(__file__) + if is_libbeat: + goversion_filepath = os.path.join(get_rootfolder(), "libbeat","beat", "version.go") + ymlversion_filepath = os.path.join(get_rootfolder(), "dev-tools", "packer", "version.yml") + go_template = goversion_template_libbeat + else: + goversion_filepath = os.path.join(get_rootfolder(), "version.go") + ymlversion_filepath = os.path.join(get_rootfolder(), "version.yml") + go_template = goversion_template + + # Create version.go and version.yml files + create_from_template(goversion_filepath, go_template, version) + create_from_template(ymlversion_filepath, yamlversion_template, version) + + # Updates all version files with the new templates + os.chdir(get_rootfolder()) print("Update build files") check_call("make update", shell=True) if __name__ == "__main__": main() + diff --git a/vendor/github.com/elastic/beats/filebeat/.gitignore b/vendor/github.com/elastic/beats/filebeat/.gitignore index 8144f8a2..6af3ddf1 100644 --- a/vendor/github.com/elastic/beats/filebeat/.gitignore +++ b/vendor/github.com/elastic/beats/filebeat/.gitignore @@ -5,4 +5,5 @@ filebeat build +_meta/kibana /tests/load/logs diff --git a/vendor/github.com/elastic/beats/filebeat/Dockerfile b/vendor/github.com/elastic/beats/filebeat/Dockerfile new file mode 100644 index 00000000..0cb92d48 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/Dockerfile @@ -0,0 +1,14 @@ +FROM golang:1.7.4 +MAINTAINER Nicolas Ruflin + +RUN set -x && \ + apt-get update && \ + apt-get install -y --no-install-recommends \ + netcat python-pip virtualenv && \ + apt-get clean + +# Setup work environment +ENV FILEBEAT_PATH /go/src/github.com/elastic/beats/filebeat + +RUN mkdir -p $FILEBEAT_PATH/build/coverage +WORKDIR $FILEBEAT_PATH diff --git a/vendor/github.com/elastic/beats/filebeat/Makefile b/vendor/github.com/elastic/beats/filebeat/Makefile index eab8ca54..b8966eaa 100644 --- a/vendor/github.com/elastic/beats/filebeat/Makefile +++ b/vendor/github.com/elastic/beats/filebeat/Makefile @@ -3,7 +3,7 @@ BEATNAME?=filebeat BEAT_DESCRIPTION?=Filebeat sends log files to Logstash or directly to Elasticsearch. SYSTEM_TESTS=true -TEST_ENVIRONMENT=false +TEST_ENVIRONMENT?=true GOX_FLAGS='-arch=amd64 386 arm ppc64 ppc64le' include ../libbeat/scripts/Makefile @@ -12,6 +12,32 @@ include ../libbeat/scripts/Makefile .PHONY: before-build before-build: -# Collects all dependencies and then calls update +# Collects all module dashboards +.PHONY: kibana +kibana: + # To not remove index-pattern as generated by update + -rm -r _meta/kibana/dashboard _meta/kibana/search _meta/kibana/visualization + mkdir -p _meta/kibana + -cp -r module/*/_meta/kibana _meta/ + +# Collects all module and dataset fields +.PHONY: fields +fields: + mkdir -p _meta/ + cat ${ES_BEATS}/filebeat/_meta/fields.common.yml > _meta/fields.generated.yml + . ${PYTHON_ENV}/bin/activate; python ${ES_BEATS}/metricbeat/scripts/fields_collector.py >> _meta/fields.generated.yml + + +# Runs all collection steps and updates afterwards .PHONY: collect collect: + # dummy implementation to avoid adding the modules until released + mkdir -p _meta/ + cat ${ES_BEATS}/filebeat/_meta/fields.common.yml > _meta/fields.generated.yml + + +# Creates a new fileset. Requires the params MODULE and FILESET +.PHONY: create-fileset +create-fileset: python-env + . ${PYTHON_ENV}/bin/activate; python ${ES_BEATS}/filebeat/scripts/create_fileset.py --path=$(PWD) --es_beats=$(ES_BEATS) --module=$(MODULE) --fileset=$(FILESET) + diff --git a/vendor/github.com/elastic/beats/filebeat/_meta/beat.full.yml b/vendor/github.com/elastic/beats/filebeat/_meta/beat.full.yml index 265d4627..7de2e652 100644 --- a/vendor/github.com/elastic/beats/filebeat/_meta/beat.full.yml +++ b/vendor/github.com/elastic/beats/filebeat/_meta/beat.full.yml @@ -205,6 +205,9 @@ filebeat.prospectors: # Note: Potential data loss. Make sure to read and understand the docs for this option. #close_timeout: 0 + # Defines if prospectors is enabled + #enabled: true + #----------------------------- Stdin prospector ------------------------------- # Configuration to use stdin input #- input_type: stdin diff --git a/vendor/github.com/elastic/beats/filebeat/_meta/fields.common.yml b/vendor/github.com/elastic/beats/filebeat/_meta/fields.common.yml new file mode 100644 index 00000000..84fc532a --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/_meta/fields.common.yml @@ -0,0 +1,34 @@ +- key: log + title: Log File Content + description: > + Contains log file lines. + fields: + - name: source + type: keyword + required: true + description: > + The file from which the line was read. This field contains the full path to the file. + For example: `/var/log/system.log`. + + - name: offset + type: long + required: false + description: > + The file offset the reported line starts at. + + - name: message + type: text + ignore_above: 0 + required: true + description: > + The content of the line read from the log file. + + - name: type + required: true + description: > + The name of the log event. This field is set to the value specified for the `document_type` option in the prospector section of the Filebeat config file. + + - name: input_type + required: true + description: > + The input type from which the event was generated. This field is set to the value specified for the `input_type` option in the prospector section of the Filebeat config file. diff --git a/vendor/github.com/elastic/beats/filebeat/crawler/crawler.go b/vendor/github.com/elastic/beats/filebeat/crawler/crawler.go index 902ff740..7729c254 100644 --- a/vendor/github.com/elastic/beats/filebeat/crawler/crawler.go +++ b/vendor/github.com/elastic/beats/filebeat/crawler/crawler.go @@ -40,7 +40,9 @@ func (c *Crawler) Start(states file.States, once bool) error { if err != nil { return fmt.Errorf("Error in initing prospector: %s", err) } - c.prospectors = append(c.prospectors, prospector) + if prospector.IsEnabled() { + c.prospectors = append(c.prospectors, prospector) + } } logp.Info("Loading Prospectors completed. Number of prospectors: %v", len(c.prospectors)) diff --git a/vendor/github.com/elastic/beats/filebeat/docker-compose.yml b/vendor/github.com/elastic/beats/filebeat/docker-compose.yml new file mode 100644 index 00000000..8eef2072 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/docker-compose.yml @@ -0,0 +1,18 @@ +version: '2' +services: + beat: + build: ${PWD}/. + depends_on: + - elasticsearch + env_file: + - ${PWD}/build/test.env + working_dir: /go/src/github.com/elastic/beats/filebeat + volumes: + - ${PWD}/..:/go/src/github.com/elastic/beats/ + command: make + entrypoint: /go/src/github.com/elastic/beats/filebeat/docker-entrypoint.sh + + elasticsearch: + extends: + file: ../testing/environments/${TESTING_ENVIRONMENT}.yml + service: elasticsearch diff --git a/vendor/github.com/elastic/beats/filebeat/docker-entrypoint.sh b/vendor/github.com/elastic/beats/filebeat/docker-entrypoint.sh new file mode 100755 index 00000000..73e6d534 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/docker-entrypoint.sh @@ -0,0 +1,69 @@ +#!/bin/bash +set -e + +# This script is the entrypoint to the filebeat Docker container. This will +# verify that all services are running before executing the command provided +# to the docker container. + +setDefaults() { + # Use default ports and hosts if not specified. + : ${ES_HOST:=localhost} + : ${ES_PORT:=9200} +} + +es_url() { + local auth + + auth="" + if [ -n "$ES_USER" ]; then + auth="$ES_USER" + if [ -n "$ES_PASS" ]; then + auth="$auth:$ES_PASS" + fi + auth="$auth@" + fi + + if [ "$SHIELD" == "true" ]; then + code=$(curl --write-out "%{http_code}\n" --silent --output /dev/null "http://${ES_HOST}:${ES_PORT}/") + + if [ $code != 401 ]; then + echo "Shield does not seem to be running" + exit 1 + fi + fi + echo "http://${auth}${ES_HOST}:${ES_PORT}" +} + +# Wait for elasticsearch to start. It requires that the status be either +# green or yellow. +waitForElasticsearch() { + echo -n "Waiting on elasticsearch($(es_url)) to start." + for ((i=1;i<=60;i++)) + do + health=$(curl --silent "$(es_url)/_cat/health" | awk '{print $4}') + if [[ "$health" == "green" ]] || [[ "$health" == "yellow" ]] + then + echo + echo "Elasticsearch is ready!" + return 0 + fi + + echo -n '.' + sleep 1 + done + + echo + echo >&2 'Elasticsearch is not running or is not healthy.' + echo >&2 "Address: $(es_url)" + echo >&2 "$health" + exit 1 +} + +# Main +setDefaults + +# Services need to test outputs +# Wait until all services are started +waitForElasticsearch + +exec "$@" diff --git a/vendor/github.com/elastic/beats/filebeat/docs/getting-started.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/getting-started.asciidoc index 9651cf58..08f76859 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/getting-started.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/getting-started.asciidoc @@ -30,7 +30,7 @@ mac>> for OS X, and <> for Windows). [NOTE] ================================================== -If you use Apt or Yum, you can {libbeat}/setup-repositories.html[install Filebeat from our repositories] to update to the newest version more easily. +If you use Apt or Yum, you can <> to update to the newest version more easily. See our https://www.elastic.co/downloads/beats/filebeat[download page] for other installation options, such as 32-bit images. @@ -94,7 +94,8 @@ see <>. To configure Filebeat, you edit the configuration file. For rpm and deb, you'll find the configuration file at `/etc/filebeat/filebeat.yml`. For mac and win, look in -the archive that you just extracted. +the archive that you just extracted. There’s also a full example configuration file +called `filebeat.full.yml` that shows all non-deprecated options. Here is a sample of the `filebeat` section of the `filebeat.yml` file. Filebeat uses predefined default values for most configuration options. @@ -166,6 +167,10 @@ include::../../libbeat/docs/shared-template-load.asciidoc[] Start Filebeat by issuing the appropriate command for your platform. +NOTE: If you use an init.d script to start Filebeat on deb or rpm, you can't +specify command line flags (see <>). To specify flags, +start Filebeat in the foreground. + *deb:* [source,shell] @@ -203,7 +208,12 @@ Filebeat is now ready to send log files to your defined output. We don't offer prebuilt dashboards for visualizing Filebeat data. However, to make it easier for you to explore Filebeat data in Kibana, we've created a Filebeat index pattern: `filebeat-*`. To load this pattern, you can use the script -that's provided for importing dashboards. +that's provided for importing dashboards: + +[source,shell] +---------------------------------------------------------------------- +./scripts/import_dashboards -only-index +---------------------------------------------------------------------- For more information about running the script, see {libbeat}/import-dashboards.html[Importing Existing Beat Dashboards] in the _Beats Platform Reference_. diff --git a/vendor/github.com/elastic/beats/filebeat/docs/index.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/index.asciidoc index b8ff30cb..6b6682e5 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/index.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/index.asciidoc @@ -1,6 +1,6 @@ = Filebeat Reference -include::./version.asciidoc[] +include::../../libbeat/docs/version.asciidoc[] :libbeat: http://www.elastic.co/guide/en/beats/libbeat/{doc-branch} :packetbeat: http://www.elastic.co/guide/en/beats/packetbeat/{doc-branch} @@ -8,7 +8,7 @@ include::./version.asciidoc[] :filebeat: http://www.elastic.co/guide/en/beats/filebeat/{doc-branch} :winlogbeat: http://www.elastic.co/guide/en/beats/winlogbeat/{doc-branch} :elasticsearch: https://www.elastic.co/guide/en/elasticsearch/reference/{doc-branch} -:securitydoc: https://www.elastic.co/guide/en/x-pack/5.0 +:securitydoc: https://www.elastic.co/guide/en/x-pack/5.2 :version: {stack-version} :beatname_lc: filebeat :beatname_uc: Filebeat @@ -22,6 +22,8 @@ include::./command-line.asciidoc[] include::../../libbeat/docs/shared-directory-layout.asciidoc[] +include::../../libbeat/docs/repositories.asciidoc[] + include::./upgrading.asciidoc[] include::./how-filebeat-works.asciidoc[] diff --git a/vendor/github.com/elastic/beats/filebeat/docs/reference/configuration/filebeat-options.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/reference/configuration/filebeat-options.asciidoc index 2e384f87..345160f6 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/reference/configuration/filebeat-options.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/reference/configuration/filebeat-options.asciidoc @@ -240,7 +240,9 @@ When this option is enabled, Filebeat closes a file as soon as the end of a file WARNING: Only use this option if you understand that data loss is a potential side effect. Another side effect is that multiline events might not be completely sent before the timeout expires. -When this option is enabled, Filebeat gives every harvester a predefined lifetime. Regardless of where the reader is in the file, reading will stop after the `close_timeout` period has elapsed. This option can be useful for older log files when you want to spend only a predefined amount of time on the files. If you set `close_timeout` to equal `ignore_older`, the file will not be picked up if it's modified while the harvester is closed. This combination of settings normally leads to data loss, and the complete file is not sent. +When this option is enabled, Filebeat gives every harvester a predefined lifetime. Regardless of where the reader is in the file, reading will stop after the `close_timeout` period has elapsed. This option can be useful for older log files when you want to spend only a predefined amount of time on the files. While `close_timeout` will close the file after the predefined timeout, if the file is still being updated, the prospector will start a new harvester again per the defined `scan_frequency`. And the close_timeout for this harvester will start again with the countdown for the timeout. + +If you set `close_timeout` to equal `ignore_older`, the file will not be picked up if it's modified while the harvester is closed. This combination of settings normally leads to data loss, and the complete file is not sent. When you use `close_timeout` for logs that contain multiline events, the harvester might stop in the middle of a multiline event, which means that only parts of the event will be sent. If the harvester is started again and the file still exists, only the second part of the event will be sent. @@ -320,16 +322,11 @@ Example configuration: [source,yaml] ------------------------------------------------------------------------------------- -json.message_key: log json.keys_under_root: true json.add_error_key: true +json.message_key: log ------------------------------------------------------------------------------------- - -*`message_key`*:: JSON key on which to apply the line filtering and multiline settings. This key must be top level -and its value must be string, otherwise it is ignored. If no text key is defined, the line -filtering and multiline features cannot be used. - *`keys_under_root`*:: By default, the decoded JSON is placed under a "json" key in the output document. If you enable this setting, the keys are copied top level in the output document. The default is false. @@ -337,7 +334,14 @@ If you enable this setting, the keys are copied top level in the output document JSON object overwrite the fields that Filebeat normally adds (type, source, offset, etc.) in case of conflicts. *`add_error_key`*:: If this setting is enabled, Filebeat adds a "json_error" key in case of JSON -unmarshaling errors or when a text key is defined in the configuration but cannot be used. +unmarshalling errors or when a `message_key` is defined in the configuration but cannot be used. + +*`message_key`*:: An optional configuration setting that specifies a JSON key on +which to apply the line filtering and multiline settings. If specified the +key must be at the top level in the JSON object and the value associated with +the key must be a string, otherwise no filtering or multiline aggregation will +occur. + [[multiline]] ===== multiline @@ -460,6 +464,10 @@ Currently if a new harvester can be started again, the harvester is picked rando This configuration option applies per prospector. You can use this option to indirectly set higher priorities on certain prospectors by assigning a higher limit of harvesters. +===== enabled + +The `enabled` option can be used with each prospector to define if a prospector is enabled or not. By default, enabled is set to true. + [[configuration-global-options]] === Filebeat Global Configuration @@ -503,7 +511,7 @@ filebeat.idle_timeout: 5s ===== registry_file The name of the registry file. If a relative path is used, it is considered relative to the -data path. See the <> section for details. The default is `registry`. +data path. See the <> section for details. The default is `${path.data}/registry`. [source,yaml] ------------------------------------------------------------------------------------- @@ -512,6 +520,9 @@ filebeat.registry_file: registry It is not possible to use a symlink as registry file. +NOTE: The registry file is only updated when new events are flushed and not on a predefined period. +That means in case there are some states where the TTL expired, these are only removed when new event are processed. + ===== config_dir diff --git a/vendor/github.com/elastic/beats/filebeat/docs/securing-filebeat.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/securing-filebeat.asciidoc index df525cf5..d1ccfbb6 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/securing-filebeat.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/securing-filebeat.asciidoc @@ -9,12 +9,18 @@ The following topics describe how to secure communication between Filebeat and o * <> * <> +//sets block macro for https.asciidoc included in next section + -- [[securing-communication-elasticsearch]] == Securing Communication With Elasticsearch + include::../../libbeat/docs/https.asciidoc[] +//sets block macro for shared-ssl-logstash-config.asciidoc included in next section + [[configuring-ssl-logstash]] == Securing Communication With Logstash by Using SSL + include::../../libbeat/docs/shared-ssl-logstash-config.asciidoc[] \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/filebeat/docs/troubleshooting.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/troubleshooting.asciidoc index c925d521..ea6f8a57 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/troubleshooting.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/troubleshooting.asciidoc @@ -10,6 +10,8 @@ following tips: * <> * <> +//sets block macro for getting-help.asciidoc included in next section + -- [[getting-help]] @@ -17,6 +19,8 @@ following tips: include::../../libbeat/docs/getting-help.asciidoc[] +//sets block macro for debugging.asciidoc included in next section + [[enable-filebeat-debugging]] == Debugging diff --git a/vendor/github.com/elastic/beats/filebeat/filebeat.full.yml b/vendor/github.com/elastic/beats/filebeat/filebeat.full.yml index 7eab33f1..4fec95ec 100644 --- a/vendor/github.com/elastic/beats/filebeat/filebeat.full.yml +++ b/vendor/github.com/elastic/beats/filebeat/filebeat.full.yml @@ -205,6 +205,9 @@ filebeat.prospectors: # Note: Potential data loss. Make sure to read and understand the docs for this option. #close_timeout: 0 + # Defines if prospectors is enabled + #enabled: true + #----------------------------- Stdin prospector ------------------------------- # Configuration to use stdin input #- input_type: stdin diff --git a/vendor/github.com/elastic/beats/filebeat/filebeat.py b/vendor/github.com/elastic/beats/filebeat/filebeat.py new file mode 100755 index 00000000..f7963093 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/filebeat.py @@ -0,0 +1,197 @@ +#!/usr/bin/env python +import argparse +import sys +import os +import yaml +import requests +import tempfile +import subprocess +import socket +from jinja2 import Template + + +def main(): + parser = argparse.ArgumentParser( + description="PROTOTYPE: start filebeat with a module configuration") + parser.add_argument("--modules", default="", + help="From branch") + parser.add_argument("--es", default="http://localhost:9200", + help="Elasticsearch URL") + parser.add_argument("--index", default=None, + help="Elasticsearch index") + parser.add_argument("--registry", default=None, + help="Registry file to use") + parser.add_argument("-M", nargs="*", type=str, default=None, + help="Variables overrides. e.g. path=/test") + parser.add_argument("--once", action="store_true", + help="Run filebeat with the -once flag") + + args = parser.parse_args() + print args + + # changing directory because we use paths relative to the binary + os.chdir(os.path.dirname(sys.argv[0])) + + modules = args.modules.split(",") + if len(modules) == 0: + print("You need to specify at least a module") + sys.exit(1) + + load_dashboards(args) + load_datasets(args, modules) + + +def load_dashboards(args): + cmd = ["../libbeat/dashboards/import_dashboards", + "-dir", "_meta/kibana", + "-es", args.es] + subprocess.Popen(cmd).wait() + + +def load_datasets(args, modules): + prospectors = "" + for module in modules: + path = os.path.join("module", module) + if not os.path.isdir(path): + print("Module {} not found".format(module)) + sys.exit(1) + print("Found module {} in {}".format(module, path)) + + filesets = [name for name in os.listdir(path) if + os.path.isfile(os.path.join(path, name, "manifest.yml"))] + + print("Found filesets: {}".format(filesets)) + + for fileset in filesets: + prospectors += load_fileset(args, module, fileset, + os.path.join(path, fileset)) + + print("Generated configuration: {}".format(prospectors)) + run_filebeat(args, prospectors) + + +def load_fileset(args, module, fileset, path): + manifest = yaml.load(file(os.path.join(path, "manifest.yml"), "r")) + var = evaluate_vars(args, manifest["vars"], module, fileset) + var["beat"] = dict(module=module, fileset=fileset, path=path, args=args) + print("Evaluated variables: {}".format(var)) + + load_pipeline(var, manifest["ingest_pipeline"]) + generate_prospectors(var, manifest["prospectors"]) + + return var["beat"]["prospectors"] + + +def evaluate_vars(args, var_in, module, fileset): + var = { + "builtin": get_builtin_vars() + } + for name, vals in var_in.items(): + var[name] = vals["default"] + if sys.platform == "darwin" and "os.darwin" in vals: + var[name] = vals["os.darwin"] + elif sys.platform == "windows" and "os.windows" in vals: + var[name] = vals["os.windows"] + + if isinstance(var[name], basestring): + var[name] = Template(var[name]).render(var) + elif isinstance(var[name], list): + # only supports array of strings atm + var[name] = [Template(x).render(var) for x in var[name]] + + # overrides + if args.M is not None: + for pair in args.M: + key, val = pair.partition("=")[::2] + if key.startswith("{}.{}.".format(module, fileset)): + key = key[len("{}.{}.".format(module, fileset)):] + + # this is a hack in the prototype only, because + # here we don't know the type of each variable type. + if key == "paths": + val = val.split(",") + var[key] = val + + return var + + +def get_builtin_vars(): + host = socket.gethostname() + hostname, _, domain = host.partition(".") + # separate the domain + return { + "hostname": hostname, + "domain": domain + } + + +def load_pipeline(var, pipeline): + path = os.path.join(var["beat"]["path"], Template(pipeline).render(var)) + print("Loading ingest pipeline: {}".format(path)) + var["beat"]["pipeline_id"] = var["beat"]["module"] + '-' + var["beat"]["fileset"] + \ + '-' + os.path.splitext(os.path.basename(path))[0] + print("Pipeline id: {}".format(var["beat"]["pipeline_id"])) + + with open(path, "r") as f: + contents = f.read() + + r = requests.put("{}/_ingest/pipeline/{}" + .format(var["beat"]["args"].es, + var["beat"]["pipeline_id"]), + data=contents) + if r.status_code >= 300: + print("Error posting pipeline: {}".format(r.text)) + sys.exit(1) + + +def run_filebeat(args, prospectors): + cfg_template = """ +filebeat.prospectors: +{{prospectors}} + +output.elasticsearch.hosts: ["{{es}}"] +output.elasticsearch.pipeline: "%{[fields.pipeline_id]}" +""" + if args.index: + cfg_template += "\noutput.elasticsearch.index: {}".format(args.index) + + if args.once: + cfg_template += "\nfilebeat.idle_timeout: 0.5s" + + if args.registry: + cfg_template += "\nfilebeat.registry_file: {}".format(args.registry) + + fd, fname = tempfile.mkstemp(suffix=".yml", prefix="filebeat-", + text=True) + with open(fname, "w") as cfgfile: + cfgfile.write(Template(cfg_template).render( + dict(prospectors=prospectors, es=args.es))) + print("Wrote configuration file: {}".format(cfgfile.name)) + os.close(fd) + + cmd = ["./filebeat.test", "-systemTest", + "-e", "-c", cfgfile.name, "-d", "*"] + if args.once: + cmd.append("-once") + print("Starting filebeat: " + " ".join(cmd)) + + subprocess.Popen(cmd).wait() + + +def generate_prospectors(var, prospectors): + var["beat"]["prospectors"] = "" + for pr in prospectors: + path = os.path.join(var["beat"]["path"], Template(pr).render(var)) + with open(path, "r") as f: + contents = Template(f.read()).render(var) + if var["beat"]["args"].once: + contents += "\n close_eof: true" + contents += "\n scan_frequency: 0.2s" + if "multiline" in contents: + contents += "\n multiline.timeout: 0.2s" + + var["beat"]["prospectors"] += "\n" + contents + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/vendor/github.com/elastic/beats/filebeat/filebeat.template-es2x.json b/vendor/github.com/elastic/beats/filebeat/filebeat.template-es2x.json index f8faf52c..5c034a41 100644 --- a/vendor/github.com/elastic/beats/filebeat/filebeat.template-es2x.json +++ b/vendor/github.com/elastic/beats/filebeat/filebeat.template-es2x.json @@ -7,7 +7,7 @@ } }, "_meta": { - "version": "5.1.2" + "version": "5.2.2" }, "dynamic_templates": [ { diff --git a/vendor/github.com/elastic/beats/filebeat/filebeat.template.json b/vendor/github.com/elastic/beats/filebeat/filebeat.template.json index 4b589c46..8013ef82 100644 --- a/vendor/github.com/elastic/beats/filebeat/filebeat.template.json +++ b/vendor/github.com/elastic/beats/filebeat/filebeat.template.json @@ -5,7 +5,7 @@ "norms": false }, "_meta": { - "version": "5.1.2" + "version": "5.2.2" }, "dynamic_templates": [ { @@ -98,6 +98,7 @@ }, "order": 0, "settings": { + "index.mapping.total_fields.limit": 10000, "index.refresh_interval": "5s" }, "template": "filebeat-*" diff --git a/vendor/github.com/elastic/beats/filebeat/input/event.go b/vendor/github.com/elastic/beats/filebeat/input/event.go index 198e2c6c..415de417 100644 --- a/vendor/github.com/elastic/beats/filebeat/input/event.go +++ b/vendor/github.com/elastic/beats/filebeat/input/event.go @@ -1,13 +1,12 @@ package input import ( - "fmt" "time" "github.com/elastic/beats/filebeat/harvester/reader" "github.com/elastic/beats/filebeat/input/file" "github.com/elastic/beats/libbeat/common" - "github.com/elastic/beats/libbeat/logp" + "github.com/elastic/beats/libbeat/common/jsontransform" ) // Event is sent to the output and must contain all relevant information @@ -80,43 +79,6 @@ func mergeJSONFields(e *Event, event common.MapStr, jsonFields common.MapStr) { // Delete existing json key delete(event, "json") - for k, v := range jsonFields { - if e.JSONConfig.OverwriteKeys { - if k == "@timestamp" { - vstr, ok := v.(string) - if !ok { - logp.Err("JSON: Won't overwrite @timestamp because value is not string") - event[reader.JsonErrorKey] = "@timestamp not overwritten (not string)" - continue - } - - // @timestamp must be of format RFC3339 - ts, err := time.Parse(time.RFC3339, vstr) - if err != nil { - logp.Err("JSON: Won't overwrite @timestamp because of parsing error: %v", err) - event[reader.JsonErrorKey] = fmt.Sprintf("@timestamp not overwritten (parse error on %s)", vstr) - continue - } - event[k] = common.Time(ts) - } else if k == "type" { - vstr, ok := v.(string) - if !ok { - logp.Err("JSON: Won't overwrite type because value is not string") - event[reader.JsonErrorKey] = "type not overwritten (not string)" - continue - } - if len(vstr) == 0 || vstr[0] == '_' { - logp.Err("JSON: Won't overwrite type because value is empty or starts with an underscore") - event[reader.JsonErrorKey] = fmt.Sprintf("type not overwritten (invalid value [%s])", vstr) - continue - } - event[k] = vstr - } else { - event[k] = v - } - } else if _, exists := event[k]; !exists { - event[k] = v - } - } + jsontransform.WriteJSONKeys(event, jsonFields, e.JSONConfig.OverwriteKeys, reader.JsonErrorKey) } } diff --git a/vendor/github.com/elastic/beats/filebeat/module/mysql/_meta/fields.yml b/vendor/github.com/elastic/beats/filebeat/module/mysql/_meta/fields.yml new file mode 100644 index 00000000..4cafbcd8 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/mysql/_meta/fields.yml @@ -0,0 +1,10 @@ +- key: mysql + title: "MySQL" + description: > + Module for parsing the MySQL log files. + fields: + - name: mysql + type: group + description: > + Fields from the MySQL log files. + fields: diff --git a/vendor/github.com/elastic/beats/filebeat/module/mysql/_meta/kibana/dashboard/Filebeat-MySQL-Dashboard.json b/vendor/github.com/elastic/beats/filebeat/module/mysql/_meta/kibana/dashboard/Filebeat-MySQL-Dashboard.json new file mode 100644 index 00000000..56e68d6c --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/mysql/_meta/kibana/dashboard/Filebeat-MySQL-Dashboard.json @@ -0,0 +1,13 @@ +{ + "hits": 0, + "timeRestore": false, + "description": "", + "title": "Filebeat MySQL Dashboard", + "uiStateJSON": "{\"P-1\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}}}", + "panelsJSON": "[{\"col\":1,\"id\":\"MySQL-slowest-queries\",\"panelIndex\":1,\"row\":8,\"size_x\":6,\"size_y\":4,\"type\":\"visualization\"},{\"col\":1,\"id\":\"MySQL-Slow-queries-over-time\",\"panelIndex\":2,\"row\":1,\"size_x\":6,\"size_y\":3,\"type\":\"visualization\"},{\"col\":7,\"id\":\"MySQL-error-logs\",\"panelIndex\":3,\"row\":1,\"size_x\":6,\"size_y\":3,\"type\":\"visualization\"},{\"col\":7,\"columns\":[\"mysql.error.level\",\"mysql.error.message\"],\"id\":\"Filebeat-MySQL-error-log\",\"panelIndex\":4,\"row\":8,\"size_x\":6,\"size_y\":5,\"sort\":[\"@timestamp\",\"desc\"],\"type\":\"search\"},{\"col\":7,\"id\":\"MySQL-Error-logs-levels\",\"panelIndex\":5,\"row\":4,\"size_x\":6,\"size_y\":4,\"type\":\"visualization\"},{\"col\":1,\"id\":\"MySQL-Slow-logs-by-count\",\"panelIndex\":6,\"row\":4,\"size_x\":6,\"size_y\":4,\"type\":\"visualization\"}]", + "optionsJSON": "{\"darkTheme\":false}", + "version": 1, + "kibanaSavedObjectMeta": { + "searchSourceJSON": "{\"filter\":[{\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"*\"}}}]}" + } +} \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/filebeat/module/mysql/_meta/kibana/search/Filebeat-MySQL-Slow-log.json b/vendor/github.com/elastic/beats/filebeat/module/mysql/_meta/kibana/search/Filebeat-MySQL-Slow-log.json new file mode 100644 index 00000000..ab26f37b --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/mysql/_meta/kibana/search/Filebeat-MySQL-Slow-log.json @@ -0,0 +1,16 @@ +{ + "sort": [ + "@timestamp", + "desc" + ], + "hits": 0, + "description": "", + "title": "Filebeat MySQL Slow log", + "version": 1, + "kibanaSavedObjectMeta": { + "searchSourceJSON": "{\"index\":\"filebeat-*\",\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"_exists_:mysql.slowlog\"}},\"filter\":[],\"highlight\":{\"pre_tags\":[\"@kibana-highlighted-field@\"],\"post_tags\":[\"@/kibana-highlighted-field@\"],\"fields\":{\"*\":{}},\"require_field_match\":false,\"fragment_size\":2147483647}}" + }, + "columns": [ + "_source" + ] +} \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/filebeat/module/mysql/_meta/kibana/search/Filebeat-MySQL-error-log.json b/vendor/github.com/elastic/beats/filebeat/module/mysql/_meta/kibana/search/Filebeat-MySQL-error-log.json new file mode 100644 index 00000000..1e486476 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/mysql/_meta/kibana/search/Filebeat-MySQL-error-log.json @@ -0,0 +1,17 @@ +{ + "sort": [ + "@timestamp", + "desc" + ], + "hits": 0, + "description": "", + "title": "Filebeat MySQL error log", + "version": 1, + "kibanaSavedObjectMeta": { + "searchSourceJSON": "{\"index\":\"filebeat-*\",\"query\":{\"query_string\":{\"query\":\"_exists_:mysql.error\",\"analyze_wildcard\":true}},\"filter\":[],\"highlight\":{\"pre_tags\":[\"@kibana-highlighted-field@\"],\"post_tags\":[\"@/kibana-highlighted-field@\"],\"fields\":{\"*\":{}},\"require_field_match\":false,\"fragment_size\":2147483647}}" + }, + "columns": [ + "mysql.error.level", + "mysql.error.message" + ] +} \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/filebeat/module/mysql/_meta/kibana/visualization/MySQL-Error-logs-levels.json b/vendor/github.com/elastic/beats/filebeat/module/mysql/_meta/kibana/visualization/MySQL-Error-logs-levels.json new file mode 100644 index 00000000..7c0c4c1a --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/mysql/_meta/kibana/visualization/MySQL-Error-logs-levels.json @@ -0,0 +1,11 @@ +{ + "visState": "{\"title\":\"MySQL Error logs levels\",\"type\":\"pie\",\"params\":{\"shareYAxis\":true,\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"bottom\",\"isDonut\":false},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"mysql.error.level\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}", + "description": "", + "title": "MySQL Error logs levels", + "uiStateJSON": "{\"vis\":{\"colors\":{\"Note\":\"#9AC48A\",\"Warning\":\"#F9934E\",\"ERROR\":\"#E24D42\"}}}", + "version": 1, + "savedSearchId": "Filebeat-MySQL-error-log", + "kibanaSavedObjectMeta": { + "searchSourceJSON": "{\"filter\":[]}" + } +} \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/filebeat/module/mysql/_meta/kibana/visualization/MySQL-Slow-logs-by-count.json b/vendor/github.com/elastic/beats/filebeat/module/mysql/_meta/kibana/visualization/MySQL-Slow-logs-by-count.json new file mode 100644 index 00000000..f9ad3ced --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/mysql/_meta/kibana/visualization/MySQL-Slow-logs-by-count.json @@ -0,0 +1,11 @@ +{ + "visState": "{\"title\":\"MySQL Slow logs by count\",\"type\":\"pie\",\"params\":{\"shareYAxis\":true,\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"bottom\",\"isDonut\":false},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"mysql.slowlog.query\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}", + "description": "", + "title": "MySQL Slow logs by count", + "uiStateJSON": "{}", + "version": 1, + "savedSearchId": "Filebeat-MySQL-Slow-log", + "kibanaSavedObjectMeta": { + "searchSourceJSON": "{\"filter\":[]}" + } +} \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/filebeat/module/mysql/_meta/kibana/visualization/MySQL-Slow-queries-over-time.json b/vendor/github.com/elastic/beats/filebeat/module/mysql/_meta/kibana/visualization/MySQL-Slow-queries-over-time.json new file mode 100644 index 00000000..ce85abcf --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/mysql/_meta/kibana/visualization/MySQL-Slow-queries-over-time.json @@ -0,0 +1,11 @@ +{ + "visState": "{\"title\":\"MySQL Slow queries over time\",\"type\":\"histogram\",\"params\":{\"shareYAxis\":true,\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"scale\":\"linear\",\"mode\":\"stacked\",\"times\":[],\"addTimeMarker\":false,\"defaultYExtents\":false,\"setYExtents\":false,\"yAxis\":{}},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{\"customLabel\":\"Slow queries\"}},{\"id\":\"2\",\"enabled\":true,\"type\":\"date_histogram\",\"schema\":\"segment\",\"params\":{\"field\":\"@timestamp\",\"interval\":\"auto\",\"customInterval\":\"2h\",\"min_doc_count\":1,\"extended_bounds\":{}}}],\"listeners\":{}}", + "description": "", + "title": "MySQL Slow queries over time", + "uiStateJSON": "{\"vis\":{\"colors\":{\"Slow queries\":\"#EF843C\"}}}", + "version": 1, + "savedSearchId": "Filebeat-MySQL-Slow-log", + "kibanaSavedObjectMeta": { + "searchSourceJSON": "{\"filter\":[]}" + } +} \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/filebeat/module/mysql/_meta/kibana/visualization/MySQL-error-logs.json b/vendor/github.com/elastic/beats/filebeat/module/mysql/_meta/kibana/visualization/MySQL-error-logs.json new file mode 100644 index 00000000..99ff00e2 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/mysql/_meta/kibana/visualization/MySQL-error-logs.json @@ -0,0 +1,11 @@ +{ + "visState": "{\"title\":\"MySQL error logs\",\"type\":\"histogram\",\"params\":{\"shareYAxis\":true,\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"scale\":\"linear\",\"mode\":\"stacked\",\"times\":[],\"addTimeMarker\":false,\"defaultYExtents\":false,\"setYExtents\":false,\"yAxis\":{}},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{\"customLabel\":\"Error logs\"}},{\"id\":\"2\",\"enabled\":true,\"type\":\"date_histogram\",\"schema\":\"segment\",\"params\":{\"field\":\"@timestamp\",\"interval\":\"auto\",\"customInterval\":\"2h\",\"min_doc_count\":1,\"extended_bounds\":{}}}],\"listeners\":{}}", + "description": "", + "title": "MySQL error logs", + "uiStateJSON": "{\"vis\":{\"colors\":{\"Count\":\"#447EBC\",\"Error logs\":\"#1F78C1\"}}}", + "version": 1, + "savedSearchId": "Filebeat-MySQL-error-log", + "kibanaSavedObjectMeta": { + "searchSourceJSON": "{\"filter\":[]}" + } +} \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/filebeat/module/mysql/_meta/kibana/visualization/MySQL-slowest-queries.json b/vendor/github.com/elastic/beats/filebeat/module/mysql/_meta/kibana/visualization/MySQL-slowest-queries.json new file mode 100644 index 00000000..82ce2aca --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/mysql/_meta/kibana/visualization/MySQL-slowest-queries.json @@ -0,0 +1,11 @@ +{ + "visState": "{\"title\":\"MySQL slowest queries\",\"type\":\"table\",\"params\":{\"perPage\":10,\"showPartialRows\":false,\"showMeticsAtAllLevels\":false,\"sort\":{\"columnIndex\":null,\"direction\":null},\"showTotal\":false,\"totalFunc\":\"sum\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"max\",\"schema\":\"metric\",\"params\":{\"field\":\"mysql.slowlog.query_time.sec\",\"customLabel\":\"Query time\"}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"mysql.slowlog.query\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Query\"}},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"mysql.slowlog.user\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"User\"}}],\"listeners\":{}}", + "description": "", + "title": "MySQL slowest queries", + "uiStateJSON": "{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}}", + "version": 1, + "savedSearchId": "Filebeat-MySQL-Slow-log", + "kibanaSavedObjectMeta": { + "searchSourceJSON": "{\"filter\":[]}" + } +} \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/filebeat/module/mysql/error/_meta/fields.yml b/vendor/github.com/elastic/beats/filebeat/module/mysql/error/_meta/fields.yml new file mode 100644 index 00000000..c878689f --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/mysql/error/_meta/fields.yml @@ -0,0 +1,21 @@ +- name: error + type: group + description: > + Contains fields from the MySQL error logs. + fields: + - name: timestamp + description: > + The timestamp from the log line. + - name: thread_id + type: long + description: > + As of MySQL 5.7.2, this is the thread id. For MySQL versions prior to 5.7.2, this + field contains the process id. + - name: level + example: "Warning" + description: + The log level. + - name: message + type: text + description: > + The logged message. diff --git a/vendor/github.com/elastic/beats/filebeat/module/mysql/error/config/error.yml b/vendor/github.com/elastic/beats/filebeat/module/mysql/error/config/error.yml new file mode 100644 index 00000000..89991b16 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/mysql/error/config/error.yml @@ -0,0 +1,9 @@ +- input_type: log + paths: + {%- for path in paths %} + - {{path}} + {%- endfor %} + exclude_files: [".gz$"] + fields: + source_type: mysql-error + pipeline_id: {{beat.pipeline_id}} diff --git a/vendor/github.com/elastic/beats/filebeat/module/mysql/error/ingest/pipeline.json b/vendor/github.com/elastic/beats/filebeat/module/mysql/error/ingest/pipeline.json new file mode 100644 index 00000000..201cb8fd --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/mysql/error/ingest/pipeline.json @@ -0,0 +1,50 @@ +{ + "description": "Pipeline for parsing MySQL error logs. It currently uses message1 and message2 to workaround bug https://github.com/elastic/elasticsearch/issues/22117", + "processors": [{ + "grok": { + "field": "message", + "patterns": [ + "%{LOCALDATETIME:mysql.error.timestamp} (\\[%{DATA:mysql.error.level}\\] )?%{GREEDYDATA:mysql.error.message}", + "%{DATA:mysql.error.timestamp} %{NUMBER:mysql.error.thread_id} \\[%{DATA:mysql.error.level}\\] %{GREEDYDATA:mysql.error.message1}", + "%{GREEDYDATA:mysql.error.message2}" + ], + "ignore_missing": true, + "pattern_definitions": { + "LOCALDATETIME": "[0-9]+ %{TIME}", + "GREEDYDATA1": ".*" + } + } + }, { + "remove": { + "field": "message" + } + }, { + "rename": { + "field": "mysql.error.message1", + "target_field": "mysql.error.message", + "ignore_failure": true + } + }, { + "rename": { + "field": "mysql.error.message2", + "target_field": "mysql.error.message", + "ignore_failure": true + } + }, { + "date": { + "field": "mysql.error.timestamp", + "target_field": "@timestamp", + "formats": [ + "ISO8601", + "YYMMdd H:m:s" + ], + "ignore_failure": true + } + }], + "on_failure" : [{ + "set" : { + "field" : "error", + "value" : "{{ _ingest.on_failure_message }}" + } + }] +} diff --git a/vendor/github.com/elastic/beats/filebeat/module/mysql/error/manifest.yml b/vendor/github.com/elastic/beats/filebeat/module/mysql/error/manifest.yml new file mode 100644 index 00000000..3cf5ca4f --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/mysql/error/manifest.yml @@ -0,0 +1,15 @@ +module_version: 1.0 + +vars: + paths: + default: + - /var/log/mysql/error.log* + - /var/log/mysqld.log* + os.darwin: + - /usr/local/var/mysql/{{builtin.hostname}}.{{builtin.domain}}.err* + os.windows: + - "c:/programdata/MySQL/MySQL Server*/error.log*" + +ingest_pipeline: ingest/pipeline.json +prospectors: + - config/error.yml diff --git a/vendor/github.com/elastic/beats/filebeat/module/mysql/error/test/mysql-darwin-brew-5.7.10.log b/vendor/github.com/elastic/beats/filebeat/module/mysql/error/test/mysql-darwin-brew-5.7.10.log new file mode 100644 index 00000000..5d9a6d01 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/mysql/error/test/mysql-darwin-brew-5.7.10.log @@ -0,0 +1,187 @@ +161209 13:08:33 mysqld_safe Starting mysqld daemon with databases from /usr/local/var/mysql +2016-12-09T12:08:33.335060Z 0 [Warning] TIMESTAMP with implicit DEFAULT value is deprecated. Please use --explicit_defaults_for_timestamp server option (see documentation for more details). +2016-12-09T12:08:33.335892Z 0 [Warning] Insecure configuration for --secure-file-priv: Current value does not restrict location of generated files. Consider setting it to a valid, non-empty path. +2016-12-09T12:08:33.336610Z 0 [Note] /usr/local/Cellar/mysql/5.7.10/bin/mysqld (mysqld 5.7.10) starting as process 61571 ... +2016-12-09T12:08:33.345527Z 0 [Warning] Setting lower_case_table_names=2 because file system for /usr/local/var/mysql/ is case insensitive +2016-12-09T12:08:33.351596Z 0 [Note] InnoDB: Mutexes and rw_locks use GCC atomic builtins +2016-12-09T12:08:33.351632Z 0 [Note] InnoDB: Uses event mutexes +2016-12-09T12:08:33.351638Z 0 [Note] InnoDB: GCC builtin __atomic_thread_fence() is used for memory barrier +2016-12-09T12:08:33.351641Z 0 [Note] InnoDB: Compressed tables use zlib 1.2.3 +2016-12-09T12:08:33.352714Z 0 [Note] InnoDB: Number of pools: 1 +2016-12-09T12:08:33.354153Z 0 [Note] InnoDB: Using CPU crc32 instructions +2016-12-09T12:08:33.366818Z 0 [Note] InnoDB: Initializing buffer pool, total size = 128M, instances = 1, chunk size = 128M +2016-12-09T12:08:33.379566Z 0 [Note] InnoDB: Completed initialization of buffer pool +2016-12-09T12:08:33.401031Z 0 [Note] InnoDB: Highest supported file format is Barracuda. +2016-12-09T12:08:33.402267Z 0 [Note] InnoDB: Log scan progressed past the checkpoint lsn 2498863 +2016-12-09T12:08:33.402289Z 0 [Note] InnoDB: Doing recovery: scanned up to log sequence number 2498872 +2016-12-09T12:08:33.402433Z 0 [Note] InnoDB: Doing recovery: scanned up to log sequence number 2498872 +2016-12-09T12:08:33.402440Z 0 [Note] InnoDB: Database was not shutdown normally! +2016-12-09T12:08:33.402443Z 0 [Note] InnoDB: Starting crash recovery. +2016-12-09T12:08:33.549180Z 0 [Note] InnoDB: Removed temporary tablespace data file: "ibtmp1" +2016-12-09T12:08:33.549206Z 0 [Note] InnoDB: Creating shared tablespace for temporary tables +2016-12-09T12:08:33.549317Z 0 [Note] InnoDB: Setting file './ibtmp1' size to 12 MB. Physically writing the file full; Please wait ... +2016-12-09T12:08:33.585175Z 0 [Note] InnoDB: File './ibtmp1' size is now 12 MB. +2016-12-09T12:08:33.588126Z 0 [Note] InnoDB: 96 redo rollback segment(s) found. 96 redo rollback segment(s) are active. +2016-12-09T12:08:33.588151Z 0 [Note] InnoDB: 32 non-redo rollback segment(s) are active. +2016-12-09T12:08:33.588453Z 0 [Note] InnoDB: Waiting for purge to start +2016-12-09T12:08:33.641851Z 0 [Note] InnoDB: 5.7.10 started; log sequence number 2498872 +2016-12-09T12:08:33.642289Z 0 [Note] InnoDB: Loading buffer pool(s) from /usr/local/var/mysql/ib_buffer_pool +2016-12-09T12:08:33.642503Z 0 [Note] InnoDB: not started +2016-12-09T12:08:33.643066Z 0 [Note] Plugin 'FEDERATED' is disabled. +2016-12-09T12:08:33.652949Z 0 [Note] InnoDB: Buffer pool(s) load completed at 161209 13:08:33 +2016-12-09T12:08:33.662950Z 0 [Note] Found ca.pem, server-cert.pem and server-key.pem in data directory. Trying to enable SSL support using them. +2016-12-09T12:08:33.662974Z 0 [Note] Skipping generation of SSL certificates as certificate files are present in data directory. +2016-12-09T12:08:33.665405Z 0 [Warning] CA certificate ca.pem is self signed. +2016-12-09T12:08:33.665842Z 0 [Note] Skipping generation of RSA key pair as key files are present in data directory. +2016-12-09T12:08:33.698843Z 0 [Note] Server hostname (bind-address): '*'; port: 3306 +2016-12-09T12:08:33.699354Z 0 [Note] IPv6 is available. +2016-12-09T12:08:33.699553Z 0 [Note] - '::' resolves to '::'; +2016-12-09T12:08:33.699665Z 0 [Note] Server socket created on IP: '::'. +2016-12-09T12:08:33.784625Z 0 [Note] Event Scheduler: Loaded 0 events +2016-12-09T12:08:33.784722Z 0 [Note] /usr/local/Cellar/mysql/5.7.10/bin/mysqld: ready for connections. +Version: '5.7.10' socket: '/tmp/mysql.sock' port: 3306 Homebrew +2016-12-09T22:21:02.443689Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 772568ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-09T22:36:49.017929Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 898642ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-09T23:37:34.021038Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 3596603ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-10T00:17:54.198501Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 2371678ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-10T01:18:38.017222Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 3597590ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-10T01:39:00.017683Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 1173583ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-10T02:39:45.021071Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 3597610ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-10T02:49:08.015573Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 515469ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-10T03:24:15.016664Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 2059611ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-10T04:25:00.016866Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 3597614ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-10T04:34:24.021797Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 515589ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-10T04:39:18.022366Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 246613ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-10T05:40:03.016821Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 3598614ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-10T06:40:48.025959Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 3595608ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-10T06:45:55.018094Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 258594ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-10T07:46:40.016090Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 3598632ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-10T07:56:04.016254Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 515603ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-10T08:56:49.390467Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 3597607ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-10T09:06:11.019025Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 515633ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-10T10:06:56.015782Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 3597617ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-10T10:16:18.022349Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 514638ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-10T11:17:02.165133Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 3595614ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-10T11:30:44.018136Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 773594ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-10T12:03:24.017458Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 1912617ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-10T12:06:40.015089Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 150375ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-10T12:24:37.025219Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 1030636ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-10T13:25:22.017729Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 3596603ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-10T13:39:05.016348Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 774598ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-10T14:39:50.178488Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 3597787ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-10T14:49:14.023629Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 515462ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-10T15:49:59.022762Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 3597628ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-10T15:59:23.014556Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 515609ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-10T17:00:08.019274Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 3598607ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-10T17:09:30.026900Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 515633ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-10T17:48:20.017991Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 2282610ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-10T18:00:05.183218Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 515227ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-10T18:54:13.016955Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 3200608ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-10T20:13:03.016738Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 3089523ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-10T20:50:11.201413Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 2180623ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-10T20:53:54.016961Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 176629ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-10T21:03:18.023529Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 516622ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-10T22:04:03.021479Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 3598602ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-10T22:13:57.015306Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 545611ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-10T22:49:59.020252Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 2114631ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-10T23:12:12.023076Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 1287614ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-11T00:12:57.015297Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 3595581ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-11T00:26:41.053666Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 773622ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-11T00:47:44.015939Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 1215572ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-11T00:49:50.017378Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 79642ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-11T01:20:40.031015Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 1803651ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-11T02:21:24.021184Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 3595607ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-11T02:26:30.015089Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 257596ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-11T03:18:55.018844Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 3097591ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-11T04:15:14.022467Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 3331614ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-11T04:20:52.016318Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 289611ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-11T04:25:56.035073Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 257653ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-11T05:26:41.020131Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 3598198ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-11T05:36:05.024444Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 515624ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-11T06:36:50.017205Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 3598619ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-11T06:46:14.016702Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 515611ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-11T07:22:43.020901Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 2141603ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-11T07:26:22.018814Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 172601ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-11T07:35:45.031855Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 516617ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-11T08:36:30.015495Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 3597620ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-11T08:45:54.017653Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 515622ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-11T09:46:39.018813Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 3598618ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-11T09:56:03.014615Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 515592ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-11T10:56:48.042131Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 3598651ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-11T11:06:12.025718Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 515588ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-11T12:06:56.018725Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 3597614ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-11T12:16:19.014169Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 515625ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-11T13:17:04.016183Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 3597594ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-11T13:26:28.023088Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 514629ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-11T14:27:13.164509Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 3597613ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-11T14:36:38.015297Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 514596ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-11T15:01:44.026173Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 1460642ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-11T15:15:28.015652Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 773648ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-11T16:16:13.027289Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 3597657ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-11T16:30:51.021667Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 813477ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-11T17:18:43.027126Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 2824646ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-11T21:47:54.189105Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 592456ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-11T22:10:17.210550Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 1309781ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-11T23:11:02.043893Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 3596681ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-11T23:24:54.163597Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 782633ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-12T00:25:39.024180Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 3597668ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-12T00:35:03.029112Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 514635ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-12T01:07:31.173633Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 1900621ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-12T01:10:46.030187Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 147678ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-12T01:28:43.027567Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 1029630ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-12T02:29:28.163850Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 3595664ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-12T03:17:29.198796Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 2831638ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-12T03:27:41.042627Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 564697ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-12T03:29:27.172837Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 58662ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-12T04:30:12.026414Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 3595651ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-12T04:35:19.040164Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 257645ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-12T05:24:10.022697Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 2883673ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-12T05:43:00.028615Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 1060590ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-12T06:43:49.185223Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 3596652ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-12T06:48:27.033604Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 228658ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-12T06:55:03.184509Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 257407ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +2016-12-12T07:55:49.022710Z 0 [Note] InnoDB: page_cleaner: 1000ms intended loop took 3598669ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.) +161212 12:40:38 mysqld_safe Starting mysqld daemon with databases from /usr/local/var/mysql +2016-12-12T11:40:39.000639Z 0 [Warning] TIMESTAMP with implicit DEFAULT value is deprecated. Please use --explicit_defaults_for_timestamp server option (see documentation for more details). +2016-12-12T11:40:39.001684Z 0 [Warning] 'NO_ZERO_DATE', 'NO_ZERO_IN_DATE' and 'ERROR_FOR_DIVISION_BY_ZERO' sql modes should be used with strict mode. They will be merged with strict mode in a future release. +2016-12-12T11:40:39.001726Z 0 [Warning] 'NO_AUTO_CREATE_USER' sql mode was not set. +2016-12-12T11:40:39.001942Z 0 [Warning] Insecure configuration for --secure-file-priv: Current value does not restrict location of generated files. Consider setting it to a valid, non-empty path. +2016-12-12T11:40:39.003193Z 0 [Note] /usr/local/Cellar/mysql/5.7.10/bin/mysqld (mysqld 5.7.10) starting as process 97910 ... +2016-12-12T11:40:39.012775Z 0 [Warning] Setting lower_case_table_names=2 because file system for /usr/local/var/mysql/ is case insensitive +2016-12-12T11:40:39.020465Z 0 [Note] InnoDB: Mutexes and rw_locks use GCC atomic builtins +2016-12-12T11:40:39.020530Z 0 [Note] InnoDB: Uses event mutexes +2016-12-12T11:40:39.020540Z 0 [Note] InnoDB: GCC builtin __atomic_thread_fence() is used for memory barrier +2016-12-12T11:40:39.020545Z 0 [Note] InnoDB: Compressed tables use zlib 1.2.3 +2016-12-12T11:40:39.021852Z 0 [Note] InnoDB: Number of pools: 1 +2016-12-12T11:40:39.023939Z 0 [Note] InnoDB: Using CPU crc32 instructions +2016-12-12T11:40:39.037980Z 0 [Note] InnoDB: Initializing buffer pool, total size = 128M, instances = 1, chunk size = 128M +2016-12-12T11:40:39.055856Z 0 [Note] InnoDB: Completed initialization of buffer pool +2016-12-12T11:40:39.079839Z 0 [Note] InnoDB: Highest supported file format is Barracuda. +2016-12-12T11:40:39.082943Z 0 [Note] InnoDB: Log scan progressed past the checkpoint lsn 2498891 +2016-12-12T11:40:39.082997Z 0 [Note] InnoDB: Doing recovery: scanned up to log sequence number 2498900 +2016-12-12T11:40:39.083372Z 0 [Note] InnoDB: Doing recovery: scanned up to log sequence number 2498900 +2016-12-12T11:40:39.083404Z 0 [Note] InnoDB: Database was not shutdown normally! +2016-12-12T11:40:39.083412Z 0 [Note] InnoDB: Starting crash recovery. +2016-12-12T11:40:39.243369Z 0 [Note] InnoDB: Removed temporary tablespace data file: "ibtmp1" +2016-12-12T11:40:39.243411Z 0 [Note] InnoDB: Creating shared tablespace for temporary tables +2016-12-12T11:40:39.243762Z 0 [Note] InnoDB: Setting file './ibtmp1' size to 12 MB. Physically writing the file full; Please wait ... +2016-12-12T11:40:39.262976Z 0 [Note] InnoDB: File './ibtmp1' size is now 12 MB. +2016-12-12T11:40:39.264392Z 0 [Note] InnoDB: 96 redo rollback segment(s) found. 96 redo rollback segment(s) are active. +2016-12-12T11:40:39.264418Z 0 [Note] InnoDB: 32 non-redo rollback segment(s) are active. +2016-12-12T11:40:39.264631Z 0 [Note] InnoDB: Waiting for purge to start +2016-12-12T11:40:39.318411Z 0 [Note] InnoDB: 5.7.10 started; log sequence number 2498900 +2016-12-12T11:40:39.319011Z 0 [Note] InnoDB: Loading buffer pool(s) from /usr/local/var/mysql/ib_buffer_pool +2016-12-12T11:40:39.319443Z 0 [Note] InnoDB: not started +2016-12-12T11:40:39.319804Z 0 [Note] Plugin 'FEDERATED' is disabled. +2016-12-12T11:40:39.330889Z 0 [Note] InnoDB: Buffer pool(s) load completed at 161212 12:40:39 +2016-12-12T11:40:39.343948Z 0 [Note] Found ca.pem, server-cert.pem and server-key.pem in data directory. Trying to enable SSL support using them. +2016-12-12T11:40:39.343980Z 0 [Note] Skipping generation of SSL certificates as certificate files are present in data directory. +2016-12-12T11:40:39.346921Z 0 [Warning] CA certificate ca.pem is self signed. +2016-12-12T11:40:39.347342Z 0 [Note] Skipping generation of RSA key pair as key files are present in data directory. +2016-12-12T11:40:39.362897Z 0 [Note] Server hostname (bind-address): '*'; port: 3306 +2016-12-12T11:40:39.363163Z 0 [Note] IPv6 is available. +2016-12-12T11:40:39.363202Z 0 [Note] - '::' resolves to '::'; +2016-12-12T11:40:39.363224Z 0 [Note] Server socket created on IP: '::'. +2016-12-12T11:40:39.540292Z 0 [Note] Event Scheduler: Loaded 0 events +2016-12-12T11:40:39.540498Z 0 [Note] /usr/local/Cellar/mysql/5.7.10/bin/mysqld: ready for connections. +Version: '5.7.10' socket: '/tmp/mysql.sock' port: 3306 Homebrew diff --git a/vendor/github.com/elastic/beats/filebeat/module/mysql/error/test/mysql-ubuntu-5.5.53.log b/vendor/github.com/elastic/beats/filebeat/module/mysql/error/test/mysql-ubuntu-5.5.53.log new file mode 100644 index 00000000..1e9ed334 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/mysql/error/test/mysql-ubuntu-5.5.53.log @@ -0,0 +1,223 @@ +161209 14:18:50 [Warning] Using unique option prefix myisam-recover instead of myisam-recover-options is deprecated and will be removed in a future release. Please use the full name instead. +161209 14:18:50 [Note] Plugin 'FEDERATED' is disabled. +161209 14:18:50 InnoDB: The InnoDB memory heap is disabled +161209 14:18:50 InnoDB: Mutexes and rw_locks use GCC atomic builtins +161209 14:18:50 InnoDB: Compressed tables use zlib 1.2.3.4 +161209 14:18:50 InnoDB: Initializing buffer pool, size = 128.0M +161209 14:18:50 InnoDB: Completed initialization of buffer pool +InnoDB: The first specified data file ./ibdata1 did not exist: +InnoDB: a new database to be created! +161209 14:18:50 InnoDB: Setting file ./ibdata1 size to 10 MB +InnoDB: Database physically writes the file full: wait... +161209 14:18:50 InnoDB: Log file ./ib_logfile0 did not exist: new to be created +InnoDB: Setting log file ./ib_logfile0 size to 5 MB +InnoDB: Database physically writes the file full: wait... +161209 14:18:50 InnoDB: Log file ./ib_logfile1 did not exist: new to be created +InnoDB: Setting log file ./ib_logfile1 size to 5 MB +InnoDB: Database physically writes the file full: wait... +InnoDB: Doublewrite buffer not found: creating new +InnoDB: Doublewrite buffer created +InnoDB: 127 rollback segment(s) active. +InnoDB: Creating foreign key constraint system tables +InnoDB: Foreign key constraint system tables created +161209 14:18:50 InnoDB: Waiting for the background threads to start +161209 14:18:51 InnoDB: 5.5.53 started; log sequence number 0 +161209 14:18:51 InnoDB: Starting shutdown... +161209 14:18:52 InnoDB: Shutdown completed; log sequence number 1595675 +161209 14:18:52 [Warning] Using unique option prefix myisam-recover instead of myisam-recover-options is deprecated and will be removed in a future release. Please use the full name instead. +161209 14:18:52 [Note] Plugin 'FEDERATED' is disabled. +161209 14:18:52 InnoDB: The InnoDB memory heap is disabled +161209 14:18:52 InnoDB: Mutexes and rw_locks use GCC atomic builtins +161209 14:18:52 InnoDB: Compressed tables use zlib 1.2.3.4 +161209 14:18:52 InnoDB: Initializing buffer pool, size = 128.0M +161209 14:18:52 InnoDB: Completed initialization of buffer pool +161209 14:18:52 InnoDB: highest supported file format is Barracuda. +161209 14:18:52 InnoDB: Waiting for the background threads to start +161209 14:18:53 InnoDB: 5.5.53 started; log sequence number 1595675 +ERROR: 1064 You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'ALTER TABLE user ADD column Show_view_priv enum('N','Y') CHARACTER SET utf8 NOT ' at line 1 +161209 14:18:53 [ERROR] Aborting + +161209 14:18:53 InnoDB: Starting shutdown... +161209 14:18:53 InnoDB: Shutdown completed; log sequence number 1595675 +161209 14:18:53 [Note] /usr/sbin/mysqld: Shutdown complete + +161209 14:18:53 [Warning] Using unique option prefix myisam-recover instead of myisam-recover-options is deprecated and will be removed in a future release. Please use the full name instead. +161209 14:18:53 [Note] Plugin 'FEDERATED' is disabled. +161209 14:18:53 InnoDB: The InnoDB memory heap is disabled +161209 14:18:53 InnoDB: Mutexes and rw_locks use GCC atomic builtins +161209 14:18:53 InnoDB: Compressed tables use zlib 1.2.3.4 +161209 14:18:53 InnoDB: Initializing buffer pool, size = 128.0M +161209 14:18:53 InnoDB: Completed initialization of buffer pool +161209 14:18:53 InnoDB: highest supported file format is Barracuda. +161209 14:18:53 InnoDB: Waiting for the background threads to start +161209 14:18:54 InnoDB: 5.5.53 started; log sequence number 1595675 +161209 14:18:54 InnoDB: Starting shutdown... +161209 14:18:56 [ERROR] Aborting + +161209 14:18:56 InnoDB: Starting shutdown... +161209 14:18:56 InnoDB: Shutdown completed; log sequence number 1595675 +161209 14:18:56 [Note] /usr/sbin/mysqld: Shutdown complete + +161209 14:18:56 [Warning] Using unique option prefix myisam-recover instead of myisam-recover-options is deprecated and will be removed in a future release. Please use the full name instead. +161209 14:18:56 [Note] Plugin 'FEDERATED' is disabled. +161209 14:18:56 InnoDB: The InnoDB memory heap is disabled +161209 14:18:56 InnoDB: Mutexes and rw_locks use GCC atomic builtins +161209 14:18:56 InnoDB: Compressed tables use zlib 1.2.3.4 +161209 14:18:56 InnoDB: Initializing buffer pool, size = 128.0M +161209 14:18:56 InnoDB: Completed initialization of buffer pool +161209 14:18:57 InnoDB: highest supported file format is Barracuda. +161209 14:18:57 InnoDB: Waiting for the background threads to start +161209 14:18:58 InnoDB: 5.5.53 started; log sequence number 1595675 +161209 14:18:58 [Note] Server hostname (bind-address): '127.0.0.1'; port: 3306 +161209 14:18:58 [Note] - '127.0.0.1' resolves to '127.0.0.1'; +161209 14:18:58 [Note] Server socket created on IP: '127.0.0.1'. +161209 14:18:58 [Note] Event Scheduler: Loaded 0 events +161209 14:18:58 [Note] /usr/sbin/mysqld: ready for connections. +Version: '5.5.53-0ubuntu0.12.04.1' socket: '/var/run/mysqld/mysqld.sock' port: 3306 (Ubuntu) +161209 14:37:57 [Note] /usr/sbin/mysqld: Normal shutdown + +161209 14:37:57 [Note] Event Scheduler: Purging the queue. 0 events +161209 14:37:57 InnoDB: Starting shutdown... +161209 14:37:57 InnoDB: Shutdown completed; log sequence number 1595685 +161209 14:37:57 [Note] /usr/sbin/mysqld: Shutdown complete + +161209 14:37:57 [Warning] Using unique option prefix myisam-recover instead of myisam-recover-options is deprecated and will be removed in a future release. Please use the full name instead. +161209 14:37:57 [Note] Plugin 'FEDERATED' is disabled. +161209 14:37:57 InnoDB: The InnoDB memory heap is disabled +161209 14:37:57 InnoDB: Mutexes and rw_locks use GCC atomic builtins +161209 14:37:57 InnoDB: Compressed tables use zlib 1.2.3.4 +161209 14:37:57 InnoDB: Initializing buffer pool, size = 128.0M +161209 14:37:57 InnoDB: Completed initialization of buffer pool +161209 14:37:57 InnoDB: highest supported file format is Barracuda. +161209 14:37:57 InnoDB: Waiting for the background threads to start +161209 14:37:58 InnoDB: 5.5.53 started; log sequence number 1595685 +161209 14:37:58 [Note] Server hostname (bind-address): '127.0.0.1'; port: 3306 +161209 14:37:58 [Note] - '127.0.0.1' resolves to '127.0.0.1'; +161209 14:37:58 [Note] Server socket created on IP: '127.0.0.1'. +161209 14:37:58 [Note] Event Scheduler: Loaded 0 events +161209 14:37:58 [Note] /usr/sbin/mysqld: ready for connections. +Version: '5.5.53-0ubuntu0.12.04.1-log' socket: '/var/run/mysqld/mysqld.sock' port: 3306 (Ubuntu) +vagrant@precise32:~$ cat /var/log/mysql.log | grep phisically +vagrant@precise32:~$ cat /var/log/mysql.log | grep physi +vagrant@precise32:~$ cat /var/log/mysql.log | physically +physically: command not found +vagrant@precise32:~$ cat /var/log/mysql.log | grep physically +vagrant@precise32:~$ less /var/log/mysql. +mysql.err mysql.log +vagrant@precise32:~$ less /var/log/mysql.err +vagrant@precise32:~$ less /var/log/mysql.log +vagrant@precise32:~$ less /var/log/mysql/ +error.log mysql-slow.log +vagrant@precise32:~$ less /var/log/mysql/error.log +vagrant@precise32:~$ cat /var/log/mysql/error.log +161209 14:18:50 [Warning] Using unique option prefix myisam-recover instead of myisam-recover-options is deprecated and will be removed in a future release. Please use the full name instead. +161209 14:18:50 [Note] Plugin 'FEDERATED' is disabled. +161209 14:18:50 InnoDB: The InnoDB memory heap is disabled +161209 14:18:50 InnoDB: Mutexes and rw_locks use GCC atomic builtins +161209 14:18:50 InnoDB: Compressed tables use zlib 1.2.3.4 +161209 14:18:50 InnoDB: Initializing buffer pool, size = 128.0M +161209 14:18:50 InnoDB: Completed initialization of buffer pool +InnoDB: The first specified data file ./ibdata1 did not exist: +InnoDB: a new database to be created! +161209 14:18:50 InnoDB: Setting file ./ibdata1 size to 10 MB +InnoDB: Database physically writes the file full: wait... +161209 14:18:50 InnoDB: Log file ./ib_logfile0 did not exist: new to be created +InnoDB: Setting log file ./ib_logfile0 size to 5 MB +InnoDB: Database physically writes the file full: wait... +161209 14:18:50 InnoDB: Log file ./ib_logfile1 did not exist: new to be created +InnoDB: Setting log file ./ib_logfile1 size to 5 MB +InnoDB: Database physically writes the file full: wait... +InnoDB: Doublewrite buffer not found: creating new +InnoDB: Doublewrite buffer created +InnoDB: 127 rollback segment(s) active. +InnoDB: Creating foreign key constraint system tables +InnoDB: Foreign key constraint system tables created +161209 14:18:50 InnoDB: Waiting for the background threads to start +161209 14:18:51 InnoDB: 5.5.53 started; log sequence number 0 +161209 14:18:51 InnoDB: Starting shutdown... +161209 14:18:52 InnoDB: Shutdown completed; log sequence number 1595675 +161209 14:18:52 [Warning] Using unique option prefix myisam-recover instead of myisam-recover-options is deprecated and will be removed in a future release. Please use the full name instead. +161209 14:18:52 [Note] Plugin 'FEDERATED' is disabled. +161209 14:18:52 InnoDB: The InnoDB memory heap is disabled +161209 14:18:52 InnoDB: Mutexes and rw_locks use GCC atomic builtins +161209 14:18:52 InnoDB: Compressed tables use zlib 1.2.3.4 +161209 14:18:52 InnoDB: Initializing buffer pool, size = 128.0M +161209 14:18:52 InnoDB: Completed initialization of buffer pool +161209 14:18:52 InnoDB: highest supported file format is Barracuda. +161209 14:18:52 InnoDB: Waiting for the background threads to start +161209 14:18:53 InnoDB: 5.5.53 started; log sequence number 1595675 +ERROR: 1064 You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'ALTER TABLE user ADD column Show_view_priv enum('N','Y') CHARACTER SET utf8 NOT ' at line 1 +161209 14:18:53 [ERROR] Aborting + +161209 14:18:53 InnoDB: Starting shutdown... +161209 14:18:53 InnoDB: Shutdown completed; log sequence number 1595675 +161209 14:18:53 [Note] /usr/sbin/mysqld: Shutdown complete + +161209 14:18:53 [Warning] Using unique option prefix myisam-recover instead of myisam-recover-options is deprecated and will be removed in a future release. Please use the full name instead. +161209 14:18:53 [Note] Plugin 'FEDERATED' is disabled. +161209 14:18:53 InnoDB: The InnoDB memory heap is disabled +161209 14:18:53 InnoDB: Mutexes and rw_locks use GCC atomic builtins +161209 14:18:53 InnoDB: Compressed tables use zlib 1.2.3.4 +161209 14:18:53 InnoDB: Initializing buffer pool, size = 128.0M +161209 14:18:53 InnoDB: Completed initialization of buffer pool +161209 14:18:53 InnoDB: highest supported file format is Barracuda. +161209 14:18:53 InnoDB: Waiting for the background threads to start +161209 14:18:54 InnoDB: 5.5.53 started; log sequence number 1595675 +161209 14:18:54 InnoDB: Starting shutdown... +161209 14:18:55 InnoDB: Shutdown completed; log sequence number 1595675 +161209 14:18:55 [Warning] Using unique option prefix myisam-recover instead of myisam-recover-options is deprecated and will be removed in a future release. Please use the full name instead. +161209 14:18:55 [Note] Plugin 'FEDERATED' is disabled. +161209 14:18:55 InnoDB: The InnoDB memory heap is disabled +161209 14:18:55 InnoDB: Mutexes and rw_locks use GCC atomic builtins +161209 14:18:55 InnoDB: Compressed tables use zlib 1.2.3.4 +161209 14:18:55 InnoDB: Initializing buffer pool, size = 128.0M +161209 14:18:55 InnoDB: Completed initialization of buffer pool +161209 14:18:55 InnoDB: highest supported file format is Barracuda. +161209 14:18:55 InnoDB: Waiting for the background threads to start +161209 14:18:56 InnoDB: 5.5.53 started; log sequence number 1595675 +ERROR: 1050 Table 'plugin' already exists +161209 14:18:56 [ERROR] Aborting + +161209 14:18:56 InnoDB: Starting shutdown... +161209 14:18:56 InnoDB: Shutdown completed; log sequence number 1595675 +161209 14:18:56 [Note] /usr/sbin/mysqld: Shutdown complete + +161209 14:18:56 [Warning] Using unique option prefix myisam-recover instead of myisam-recover-options is deprecated and will be removed in a future release. Please use the full name instead. +161209 14:18:56 [Note] Plugin 'FEDERATED' is disabled. +161209 14:18:56 InnoDB: The InnoDB memory heap is disabled +161209 14:18:56 InnoDB: Mutexes and rw_locks use GCC atomic builtins +161209 14:18:56 InnoDB: Compressed tables use zlib 1.2.3.4 +161209 14:18:56 InnoDB: Initializing buffer pool, size = 128.0M +161209 14:18:56 InnoDB: Completed initialization of buffer pool +161209 14:18:57 InnoDB: highest supported file format is Barracuda. +161209 14:18:57 InnoDB: Waiting for the background threads to start +161209 14:18:58 InnoDB: 5.5.53 started; log sequence number 1595675 +161209 14:18:58 [Note] Server hostname (bind-address): '127.0.0.1'; port: 3306 +161209 14:18:58 [Note] - '127.0.0.1' resolves to '127.0.0.1'; +161209 14:18:58 [Note] Server socket created on IP: '127.0.0.1'. +161209 14:18:58 [Note] Event Scheduler: Loaded 0 events +161209 14:18:58 [Note] /usr/sbin/mysqld: ready for connections. +Version: '5.5.53-0ubuntu0.12.04.1' socket: '/var/run/mysqld/mysqld.sock' port: 3306 (Ubuntu) +161209 14:37:57 [Note] /usr/sbin/mysqld: Normal shutdown + +161209 14:37:57 [Note] Event Scheduler: Purging the queue. 0 events +161209 14:37:57 InnoDB: Starting shutdown... +161209 14:37:57 InnoDB: Shutdown completed; log sequence number 1595685 +161209 14:37:57 [Note] /usr/sbin/mysqld: Shutdown complete + +161209 14:37:57 [Warning] Using unique option prefix myisam-recover instead of myisam-recover-options is deprecated and will be removed in a future release. Please use the full name instead. +161209 14:37:57 [Note] Plugin 'FEDERATED' is disabled. +161209 14:37:57 InnoDB: The InnoDB memory heap is disabled +161209 14:37:57 InnoDB: Mutexes and rw_locks use GCC atomic builtins +161209 14:37:57 InnoDB: Compressed tables use zlib 1.2.3.4 +161209 14:37:57 InnoDB: Initializing buffer pool, size = 128.0M +161209 14:37:57 InnoDB: Completed initialization of buffer pool +161209 14:37:57 InnoDB: highest supported file format is Barracuda. +161209 14:37:57 InnoDB: Waiting for the background threads to start +161209 14:37:58 InnoDB: 5.5.53 started; log sequence number 1595685 +161209 14:37:58 [Note] Server hostname (bind-address): '127.0.0.1'; port: 3306 +161209 14:37:58 [Note] - '127.0.0.1' resolves to '127.0.0.1'; +161209 14:37:58 [Note] Server socket created on IP: '127.0.0.1'. +161209 14:37:58 [Note] Event Scheduler: Loaded 0 events +161209 14:37:58 [Note] /usr/sbin/mysqld: ready for connections. +Version: '5.5.53-0ubuntu0.12.04.1-log' socket: '/var/run/mysqld/mysqld.sock' port: 3306 (Ubuntu) diff --git a/vendor/github.com/elastic/beats/filebeat/module/mysql/slowlog/_meta/fields.yml b/vendor/github.com/elastic/beats/filebeat/module/mysql/slowlog/_meta/fields.yml new file mode 100644 index 00000000..c785d9bd --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/mysql/slowlog/_meta/fields.yml @@ -0,0 +1,42 @@ +- name: slowlog + type: group + description: > + Contains fields from the MySQL slow logs. + fields: + - name: user + description: > + The MySQL user that created the query. + - name: host + description: > + The host from where the user that created the query logged in. + - name: ip + description: > + The IP address from where the user that created the query logged in. + - name: query_time.sec + type: float + description: > + The total time the query took, in seconds, as a floating point number. + - name: lock_time.sec + type: float + description: > + The amount of time the query waited for the lock to be available. The + value is in seconds, as a floating point number. + - name: rows_sent + type: long + description: > + The number of rows returned by the query. + - name: rows_examined + type: long + description: > + The number of rows scanned by the query. + - name: timestamp + type: long + description: > + The unix timestamp taken from the `SET timestamp` query. + - name: query + description: > + The slow query. + - name: id + type: long + description: > + The connection ID for the query. diff --git a/vendor/github.com/elastic/beats/filebeat/module/mysql/slowlog/config/slowlog.yml b/vendor/github.com/elastic/beats/filebeat/module/mysql/slowlog/config/slowlog.yml new file mode 100644 index 00000000..a686c79d --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/mysql/slowlog/config/slowlog.yml @@ -0,0 +1,13 @@ +- input_type: log + paths: + {%- for path in paths %} + - {{path}} + {%- endfor %} + exclude_files: [".gz$"] + multiline: + pattern: "^# User@Host: " + negate: true + match: after + fields: + source_type: mysql-slowlog + pipeline_id: {{beat.pipeline_id}} diff --git a/vendor/github.com/elastic/beats/filebeat/module/mysql/slowlog/ingest/pipeline.json b/vendor/github.com/elastic/beats/filebeat/module/mysql/slowlog/ingest/pipeline.json new file mode 100644 index 00000000..f943ec68 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/mysql/slowlog/ingest/pipeline.json @@ -0,0 +1,39 @@ +{ + "description": "Pipeline for parsing MySQL slow logs.", + "processors": [{ + "grok": { + "field": "message", + "patterns":[ + "^# User@Host: %{USER:mysql.slowlog.user}(\\[[^\\]]+\\])? @ %{HOSTNAME:mysql.slowlog.host} \\[(IP:mysql.slowlog.ip)?\\](\\s*Id:\\s* %{NUMBER:mysql.slowlog.id})?\n# Query_time: %{NUMBER:mysql.slowlog.query_time.sec}\\s* Lock_time: %{NUMBER:mysql.slowlog.lock_time.sec}\\s* Rows_sent: %{NUMBER:mysql.slowlog.rows_sent}\\s* Rows_examined: %{NUMBER:mysql.slowlog.rows_examined}\n(SET timestamp=%{NUMBER:mysql.slowlog.timestamp};\n)?%{GREEDYMULTILINE:mysql.slowlog.query}" + ], + "pattern_definitions" : { + "GREEDYMULTILINE" : "(.|\n)*" + }, + "ignore_missing": true + } + }, { + "remove":{ + "field": "message" + } + }, { + "date": { + "field": "mysql.slowlog.timestamp", + "target_field": "@timestamp", + "formats": ["UNIX"], + "ignore_failure": true + } + }, { + "gsub": { + "field": "mysql.slowlog.query", + "pattern": "\n# Time: [0-9]+ [0-9][0-9]:[0-9][0-9]:[0-9][0-9](\\.[0-9]+)?$", + "replacement": "", + "ignore_failure": true + } + }], + "on_failure" : [{ + "set" : { + "field" : "error", + "value" : "{{ _ingest.on_failure_message }}" + } + }] +} diff --git a/vendor/github.com/elastic/beats/filebeat/module/mysql/slowlog/manifest.yml b/vendor/github.com/elastic/beats/filebeat/module/mysql/slowlog/manifest.yml new file mode 100644 index 00000000..309b167f --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/mysql/slowlog/manifest.yml @@ -0,0 +1,15 @@ +module_version: 1.0 + +vars: + paths: + default: + - /var/log/mysql/mysql-slow.log* + - /var/lib/mysql/{{builtin.hostname}}-slow.log + os.darwin: + - /usr/local/var/mysql/{{builtin.hostname}}-slow.log* + os.windows: + - "c:/programdata/MySQL/MySQL Server*/mysql-slow.log*" + +ingest_pipeline: ingest/pipeline.json +prospectors: + - config/slowlog.yml diff --git a/vendor/github.com/elastic/beats/filebeat/module/mysql/slowlog/test/mysql-darwin-brew-5.7.10.log b/vendor/github.com/elastic/beats/filebeat/module/mysql/slowlog/test/mysql-darwin-brew-5.7.10.log new file mode 100644 index 00000000..419db21f --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/mysql/slowlog/test/mysql-darwin-brew-5.7.10.log @@ -0,0 +1,8 @@ +/usr/local/Cellar/mysql/5.7.10/bin/mysqld, Version: 5.7.10 (Homebrew). started with: +Tcp port: 3306 Unix socket: /tmp/mysql.sock +Time Id Command Argument +# Time: 2016-12-12T11:54:16.601172Z +# User@Host: root[root] @ localhost [] Id: 2 +# Query_time: 11.004467 Lock_time: 0.000000 Rows_sent: 1 Rows_examined: 0 +SET timestamp=1481543656; +select sleep(11); diff --git a/vendor/github.com/elastic/beats/filebeat/module/mysql/slowlog/test/mysql-ubuntu-5.5.53.log b/vendor/github.com/elastic/beats/filebeat/module/mysql/slowlog/test/mysql-ubuntu-5.5.53.log new file mode 100644 index 00000000..d33bbfd3 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/mysql/slowlog/test/mysql-ubuntu-5.5.53.log @@ -0,0 +1,66 @@ +/usr/sbin/mysqld, Version: 5.5.53-0ubuntu0.12.04.1-log ((Ubuntu)). started with: +Tcp port: 3306 Unix socket: /var/run/mysqld/mysqld.sock +Time Id Command Argument +# Time: 161209 14:37:59 +# User@Host: debian-sys-maint[debian-sys-maint] @ localhost [] +# Query_time: 0.000153 Lock_time: 0.000061 Rows_sent: 1 Rows_examined: 5 +SET timestamp=1481294279; +SELECT count(*) FROM mysql.user WHERE user='root' and password=''; +# User@Host: debian-sys-maint[debian-sys-maint] @ localhost [] +# Query_time: 0.002456 Lock_time: 0.000095 Rows_sent: 31 Rows_examined: 81 +SET timestamp=1481294279; +select concat('select count(*) into @discard from `', + TABLE_SCHEMA, '`.`', TABLE_NAME, '`') + from information_schema.TABLES where ENGINE='MyISAM'; +# User@Host: debian-sys-maint[debian-sys-maint] @ localhost [] +# Query_time: 0.006278 Lock_time: 0.000153 Rows_sent: 0 Rows_examined: 808 +SET timestamp=1481294279; +select count(*) into @discard from `information_schema`.`COLUMNS`; +# User@Host: debian-sys-maint[debian-sys-maint] @ localhost [] +# Query_time: 0.000262 Lock_time: 0.000204 Rows_sent: 0 Rows_examined: 0 +SET timestamp=1481294279; +select count(*) into @discard from `information_schema`.`EVENTS`; +# User@Host: debian-sys-maint[debian-sys-maint] @ localhost [] +# Query_time: 0.000323 Lock_time: 0.000241 Rows_sent: 0 Rows_examined: 0 +SET timestamp=1481294279; +select count(*) into @discard from `information_schema`.`PARAMETERS`; +# User@Host: debian-sys-maint[debian-sys-maint] @ localhost [] +# Query_time: 0.007084 Lock_time: 0.000148 Rows_sent: 0 Rows_examined: 81 +SET timestamp=1481294279; +select count(*) into @discard from `information_schema`.`PARTITIONS`; +# User@Host: debian-sys-maint[debian-sys-maint] @ localhost [] +# Query_time: 0.000277 Lock_time: 0.000135 Rows_sent: 0 Rows_examined: 23 +SET timestamp=1481294279; +select count(*) into @discard from `information_schema`.`PLUGINS`; +# User@Host: debian-sys-maint[debian-sys-maint] @ localhost [] +# Query_time: 0.000254 Lock_time: 0.000159 Rows_sent: 0 Rows_examined: 1 +SET timestamp=1481294279; +select count(*) into @discard from `information_schema`.`PROCESSLIST`; +# User@Host: debian-sys-maint[debian-sys-maint] @ localhost [] +# Query_time: 0.000297 Lock_time: 0.000229 Rows_sent: 0 Rows_examined: 0 +SET timestamp=1481294279; +select count(*) into @discard from `information_schema`.`ROUTINES`; +# User@Host: debian-sys-maint[debian-sys-maint] @ localhost [] +# Query_time: 0.001676 Lock_time: 0.000156 Rows_sent: 0 Rows_examined: 0 +SET timestamp=1481294279; +select count(*) into @discard from `information_schema`.`TRIGGERS`; +# User@Host: debian-sys-maint[debian-sys-maint] @ localhost [] +# Query_time: 0.008782 Lock_time: 0.001187 Rows_sent: 0 Rows_examined: 0 +SET timestamp=1481294279; +select count(*) into @discard from `information_schema`.`VIEWS`; +# Time: 161209 14:39:02 +# User@Host: root[root] @ localhost [] +# Query_time: 2.000268 Lock_time: 0.000000 Rows_sent: 1 Rows_examined: 0 +SET timestamp=1481294342; +select sleep(2); +# Time: 161209 14:39:23 +# User@Host: root[root] @ localhost [] +# Query_time: 0.000138 Lock_time: 0.000056 Rows_sent: 0 Rows_examined: 0 +use mysql; +SET timestamp=1481294363; +select * from general_log; +# Time: 161209 14:39:40 +# User@Host: root[root] @ localhost [] +# Query_time: 0.000159 Lock_time: 0.000059 Rows_sent: 5 Rows_examined: 5 +SET timestamp=1481294380; +select * from user; diff --git a/vendor/github.com/elastic/beats/filebeat/module/nginx/_meta/fields.yml b/vendor/github.com/elastic/beats/filebeat/module/nginx/_meta/fields.yml new file mode 100644 index 00000000..8f9cef37 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/nginx/_meta/fields.yml @@ -0,0 +1,10 @@ +- key: nginx + title: "Nginx" + description: > + Module for parsing the Nginx log files. + fields: + - name: nginx + type: group + description: > + Fields from the Nginx log files. + fields: diff --git a/vendor/github.com/elastic/beats/filebeat/module/nginx/_meta/kibana/dashboard/Filebeat-Nginx-Dashboard.json b/vendor/github.com/elastic/beats/filebeat/module/nginx/_meta/kibana/dashboard/Filebeat-Nginx-Dashboard.json new file mode 100644 index 00000000..ef994978 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/nginx/_meta/kibana/dashboard/Filebeat-Nginx-Dashboard.json @@ -0,0 +1,13 @@ +{ + "hits": 0, + "timeRestore": false, + "description": "", + "title": "Filebeat Nginx Dashboard", + "uiStateJSON": "{\"P-4\":{\"vis\":{\"legendOpen\":true}},\"P-8\":{\"mapCenter\":[50.51342652633956,-0.17578125]}}", + "panelsJSON": "[{\"col\":9,\"id\":\"Errors-over-time\",\"panelIndex\":2,\"row\":4,\"size_x\":4,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Nginx-Access-Browsers\",\"panelIndex\":3,\"row\":10,\"size_x\":4,\"size_y\":4,\"type\":\"visualization\"},{\"col\":5,\"id\":\"Nginx-Access-OSes\",\"panelIndex\":4,\"row\":10,\"size_x\":4,\"size_y\":4,\"type\":\"visualization\"},{\"col\":1,\"id\":\"New-Visualization\",\"panelIndex\":5,\"row\":4,\"size_x\":8,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Nginx-Access-Response-codes-by-top-URLs\",\"panelIndex\":6,\"row\":7,\"size_x\":12,\"size_y\":3,\"type\":\"visualization\"},{\"col\":9,\"id\":\"Sent-sizes\",\"panelIndex\":7,\"row\":10,\"size_x\":4,\"size_y\":4,\"type\":\"visualization\"},{\"id\":\"Nginx-Access-Map\",\"type\":\"visualization\",\"panelIndex\":8,\"size_x\":12,\"size_y\":3,\"col\":1,\"row\":1}]", + "optionsJSON": "{\"darkTheme\":false}", + "version": 1, + "kibanaSavedObjectMeta": { + "searchSourceJSON": "{\"filter\":[{\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"*\"}}}]}" + } +} \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/filebeat/module/nginx/_meta/kibana/search/Filebeat-Nginx-module.json b/vendor/github.com/elastic/beats/filebeat/module/nginx/_meta/kibana/search/Filebeat-Nginx-module.json new file mode 100644 index 00000000..5b1e4518 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/nginx/_meta/kibana/search/Filebeat-Nginx-module.json @@ -0,0 +1,16 @@ +{ + "sort": [ + "@timestamp", + "desc" + ], + "hits": 0, + "description": "", + "title": "Filebeat Nginx module", + "version": 1, + "kibanaSavedObjectMeta": { + "searchSourceJSON": "{\"index\":\"filebeat-*\",\"query\":{\"query_string\":{\"query\":\"_exists_:nginx\",\"analyze_wildcard\":true}},\"filter\":[],\"highlight\":{\"pre_tags\":[\"@kibana-highlighted-field@\"],\"post_tags\":[\"@/kibana-highlighted-field@\"],\"fields\":{\"*\":{}},\"require_field_match\":false,\"fragment_size\":2147483647}}" + }, + "columns": [ + "_source" + ] +} \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/filebeat/module/nginx/_meta/kibana/visualization/Errors-over-time.json b/vendor/github.com/elastic/beats/filebeat/module/nginx/_meta/kibana/visualization/Errors-over-time.json new file mode 100644 index 00000000..bd100926 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/nginx/_meta/kibana/visualization/Errors-over-time.json @@ -0,0 +1,10 @@ +{ + "visState": "{\n \"title\": \"Errors over time\",\n \"type\": \"area\",\n \"params\": {\n \"shareYAxis\": true,\n \"addTooltip\": true,\n \"addLegend\": true,\n \"legendPosition\": \"right\",\n \"smoothLines\": false,\n \"scale\": \"linear\",\n \"interpolate\": \"linear\",\n \"mode\": \"stacked\",\n \"times\": [],\n \"addTimeMarker\": false,\n \"defaultYExtents\": false,\n \"setYExtents\": false,\n \"yAxis\": {}\n },\n \"aggs\": [\n {\n \"id\": \"1\",\n \"enabled\": true,\n \"type\": \"count\",\n \"schema\": \"metric\",\n \"params\": {}\n },\n {\n \"id\": \"2\",\n \"enabled\": true,\n \"type\": \"date_histogram\",\n \"schema\": \"segment\",\n \"params\": {\n \"field\": \"@timestamp\",\n \"interval\": \"auto\",\n \"customInterval\": \"2h\",\n \"min_doc_count\": 1,\n \"extended_bounds\": {}\n }\n },\n {\n \"id\": \"3\",\n \"enabled\": true,\n \"type\": \"terms\",\n \"schema\": \"group\",\n \"params\": {\n \"field\": \"nginx.error.level\",\n \"size\": 5,\n \"order\": \"desc\",\n \"orderBy\": \"1\"\n }\n }\n ],\n \"listeners\": {}\n}", + "description": "", + "title": "Nginx Errors over time", + "uiStateJSON": "{}", + "version": 1, + "kibanaSavedObjectMeta": { + "searchSourceJSON": "{\n \"index\": \"filebeat-*\",\n \"query\": {\n \"query_string\": {\n \"query\": \"*\",\n \"analyze_wildcard\": true\n }\n },\n \"filter\": []\n}" + } +} \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/filebeat/module/nginx/_meta/kibana/visualization/New-Visualization.json b/vendor/github.com/elastic/beats/filebeat/module/nginx/_meta/kibana/visualization/New-Visualization.json new file mode 100644 index 00000000..55499a62 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/nginx/_meta/kibana/visualization/New-Visualization.json @@ -0,0 +1,11 @@ +{ + "visState": "{\n \"title\": \"New Visualization\",\n \"type\": \"histogram\",\n \"params\": {\n \"shareYAxis\": true,\n \"addTooltip\": true,\n \"addLegend\": true,\n \"legendPosition\": \"right\",\n \"scale\": \"linear\",\n \"mode\": \"stacked\",\n \"times\": [],\n \"addTimeMarker\": false,\n \"defaultYExtents\": false,\n \"setYExtents\": false,\n \"yAxis\": {}\n },\n \"aggs\": [\n {\n \"id\": \"1\",\n \"enabled\": true,\n \"type\": \"count\",\n \"schema\": \"metric\",\n \"params\": {}\n },\n {\n \"id\": \"2\",\n \"enabled\": true,\n \"type\": \"date_histogram\",\n \"schema\": \"segment\",\n \"params\": {\n \"field\": \"@timestamp\",\n \"interval\": \"auto\",\n \"customInterval\": \"2h\",\n \"min_doc_count\": 1,\n \"extended_bounds\": {}\n }\n },\n {\n \"id\": \"3\",\n \"enabled\": true,\n \"type\": \"terms\",\n \"schema\": \"group\",\n \"params\": {\n \"field\": \"nginx.access.response_code\",\n \"size\": 5,\n \"order\": \"desc\",\n \"orderBy\": \"1\"\n }\n }\n ],\n \"listeners\": {}\n}", + "description": "", + "title": "Nginx Access over time", + "uiStateJSON": "{\n \"vis\": {\n \"colors\": {\n \"200\": \"#7EB26D\",\n \"404\": \"#614D93\"\n }\n }\n}", + "version": 1, + "savedSearchId": "Filebeat-Nginx-module", + "kibanaSavedObjectMeta": { + "searchSourceJSON": "{\n \"filter\": []\n}" + } +} \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/filebeat/module/nginx/_meta/kibana/visualization/Nginx-Access-Browsers.json b/vendor/github.com/elastic/beats/filebeat/module/nginx/_meta/kibana/visualization/Nginx-Access-Browsers.json new file mode 100644 index 00000000..85e76ddd --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/nginx/_meta/kibana/visualization/Nginx-Access-Browsers.json @@ -0,0 +1,10 @@ +{ + "visState": "{\"title\":\"Nginx Access Browsers\",\"type\":\"pie\",\"params\":{\"shareYAxis\":true,\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"bottom\",\"isDonut\":true},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"nginx.access.user_agent.name\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"nginx.access.user_agent.major\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}", + "description": "", + "title": "Nginx Access Browsers", + "uiStateJSON": "{}", + "version": 1, + "kibanaSavedObjectMeta": { + "searchSourceJSON": "{\"index\":\"filebeat-*\",\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true}},\"filter\":[]}" + } +} \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/filebeat/module/nginx/_meta/kibana/visualization/Nginx-Access-Map.json b/vendor/github.com/elastic/beats/filebeat/module/nginx/_meta/kibana/visualization/Nginx-Access-Map.json new file mode 100644 index 00000000..c4799c20 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/nginx/_meta/kibana/visualization/Nginx-Access-Map.json @@ -0,0 +1,11 @@ +{ + "visState": "{\"aggs\":[{\"enabled\":true,\"id\":\"1\",\"params\":{},\"schema\":\"metric\",\"type\":\"count\"},{\"enabled\":true,\"id\":\"2\",\"params\":{\"autoPrecision\":true,\"field\":\"nginx.access.geoip.location\"},\"schema\":\"segment\",\"type\":\"geohash_grid\"}],\"listeners\":{},\"params\":{\"addTooltip\":true,\"heatBlur\":15,\"heatMaxZoom\":16,\"heatMinOpacity\":0.1,\"heatNormalizeData\":true,\"heatRadius\":25,\"isDesaturated\":true,\"legendPosition\":\"bottomright\",\"mapCenter\":[15,5],\"mapType\":\"Scaled Circle Markers\",\"mapZoom\":2,\"wms\":{\"enabled\":false,\"options\":{\"attribution\":\"Maps provided by USGS\",\"format\":\"image/png\",\"layers\":\"0\",\"styles\":\"\",\"transparent\":true,\"version\":\"1.3.0\"},\"url\":\"https://basemap.nationalmap.gov/arcgis/services/USGSTopo/MapServer/WMSServer\"}},\"title\":\"Nginx Access Map\",\"type\":\"tile_map\"}", + "description": "", + "title": "Nginx Access Map", + "uiStateJSON": "{\"mapCenter\":[12.039320557540572,-0.17578125]}", + "version": 1, + "savedSearchId": "Filebeat-Nginx-module", + "kibanaSavedObjectMeta": { + "searchSourceJSON": "{\"filter\":[]}" + } +} \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/filebeat/module/nginx/_meta/kibana/visualization/Nginx-Access-OSes.json b/vendor/github.com/elastic/beats/filebeat/module/nginx/_meta/kibana/visualization/Nginx-Access-OSes.json new file mode 100644 index 00000000..e1350329 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/nginx/_meta/kibana/visualization/Nginx-Access-OSes.json @@ -0,0 +1,10 @@ +{ + "visState": "{\"title\":\"Nginx Access OSes\",\"type\":\"pie\",\"params\":{\"shareYAxis\":true,\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"bottom\",\"isDonut\":true},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"nginx.access.user_agent.os_name\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"nginx.access.user_agent.os_major\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}", + "description": "", + "title": "Nginx Access OSes", + "uiStateJSON": "{}", + "version": 1, + "kibanaSavedObjectMeta": { + "searchSourceJSON": "{\"index\":\"filebeat-*\",\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true}},\"filter\":[]}" + } +} \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/filebeat/module/nginx/_meta/kibana/visualization/Nginx-Access-Response-codes-by-top-URLs.json b/vendor/github.com/elastic/beats/filebeat/module/nginx/_meta/kibana/visualization/Nginx-Access-Response-codes-by-top-URLs.json new file mode 100644 index 00000000..a4075c3e --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/nginx/_meta/kibana/visualization/Nginx-Access-Response-codes-by-top-URLs.json @@ -0,0 +1,10 @@ +{ + "visState": "{\"title\":\"Nginx Access Response codes by top URLs\",\"type\":\"pie\",\"params\":{\"shareYAxis\":true,\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"isDonut\":false},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"split\",\"params\":{\"field\":\"nginx.access.url\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\",\"row\":false}},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"nginx.access.response_code\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}", + "description": "", + "title": "Nginx Access Response codes by top URLs", + "uiStateJSON": "{\"vis\":{\"colors\":{\"200\":\"#629E51\",\"404\":\"#0A50A1\"}}}", + "version": 1, + "kibanaSavedObjectMeta": { + "searchSourceJSON": "{\"index\":\"filebeat-*\",\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true}},\"filter\":[]}" + } +} \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/filebeat/module/nginx/_meta/kibana/visualization/Sent-sizes.json b/vendor/github.com/elastic/beats/filebeat/module/nginx/_meta/kibana/visualization/Sent-sizes.json new file mode 100644 index 00000000..f18b7541 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/nginx/_meta/kibana/visualization/Sent-sizes.json @@ -0,0 +1,10 @@ +{ + "visState": "{\n \"title\": \"Sent sizes\",\n \"type\": \"line\",\n \"params\": {\n \"shareYAxis\": true,\n \"addTooltip\": true,\n \"addLegend\": true,\n \"legendPosition\": \"right\",\n \"showCircles\": true,\n \"smoothLines\": true,\n \"interpolate\": \"linear\",\n \"scale\": \"linear\",\n \"drawLinesBetweenPoints\": true,\n \"radiusRatio\": \"17\",\n \"times\": [],\n \"addTimeMarker\": false,\n \"defaultYExtents\": false,\n \"setYExtents\": false,\n \"yAxis\": {}\n },\n \"aggs\": [\n {\n \"id\": \"1\",\n \"enabled\": true,\n \"type\": \"sum\",\n \"schema\": \"metric\",\n \"params\": {\n \"field\": \"nginx.access.body_sent.bytes\",\n \"customLabel\": \"Data sent\"\n }\n },\n {\n \"id\": \"2\",\n \"enabled\": true,\n \"type\": \"date_histogram\",\n \"schema\": \"segment\",\n \"params\": {\n \"field\": \"@timestamp\",\n \"interval\": \"auto\",\n \"customInterval\": \"2h\",\n \"min_doc_count\": 1,\n \"extended_bounds\": {}\n }\n },\n {\n \"id\": \"3\",\n \"enabled\": true,\n \"type\": \"count\",\n \"schema\": \"radius\",\n \"params\": {}\n }\n ],\n \"listeners\": {}\n}", + "description": "", + "title": "Nginx Sent Byte Size", + "uiStateJSON": "{}", + "version": 1, + "kibanaSavedObjectMeta": { + "searchSourceJSON": "{\n \"filter\": [],\n \"index\": \"filebeat-*\",\n \"query\": {\n \"query_string\": {\n \"query\": \"_exists_:nginx.access\",\n \"analyze_wildcard\": true\n }\n },\n \"highlight\": {\n \"pre_tags\": [\n \"@kibana-highlighted-field@\"\n ],\n \"post_tags\": [\n \"@/kibana-highlighted-field@\"\n ],\n \"fields\": {\n \"*\": {}\n },\n \"require_field_match\": false,\n \"fragment_size\": 2147483647\n }\n}" + } +} \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/filebeat/module/nginx/access/_meta/fields.yml b/vendor/github.com/elastic/beats/filebeat/module/nginx/access/_meta/fields.yml new file mode 100644 index 00000000..ca8e9282 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/nginx/access/_meta/fields.yml @@ -0,0 +1,107 @@ +- name: access + type: group + description: > + Contains fields for the Nginx access logs. + fields: + - name: remote_ip + type: keyword + description: > + Client IP address. + - name: user_name + type: keyword + description: > + The user name used when basic authentication is used. + - name: method + type: keyword + example: GET + description: > + The request HTTP method. + - name: url + type: keyword + description: > + The request HTTP URL. + - name: http_version + type: keyword + description: > + The HTTP version. + - name: response_code + type: long + description: > + The HTTP response code. + - name: body_sent.bytes + type: long + format: bytes + description: > + The number of bytes of the server response body. + - name: referrer + type: keyword + description: > + The HTTP referrer. + - name: agent + type: text + description: > + Contains the un-parsed user agent string. Only present if the user + agent Elasticsearch plugin is not available or not used. + - name: user_agent + type: group + description: > + Contains the parsed User agent field. Only present if the user + agent Elasticsearch plugin is available and used. + fields: + - name: device + type: keyword + description: > + The name of the physical device. + - name: major + type: long + description: > + The major version of the user agent. + - name: minor + type: long + description: > + The minor version of the user agent. + - name: patch + type: long + description: > + The patch version of the user agent. + - name: name + type: keyword + example: Chrome + description: > + The name of the user agent. + - name: os + type: keyword + description: > + The name of the operating system. + - name: os_major + type: long + description: > + The major version of the operating system. + - name: os_minor + type: long + description: > + The minor version of the operating system. + - name: os_name + type: keyword + description: > + The name of the operating system. + - name: geoip + type: group + description: > + Contains GeoIP information gathered based on the remote_ip field. + Only present if the GeoIP Elasticsearch plugin is available and + used. + fields: + - name: continent_name + type: keyword + description: > + The name of the continent. + - name: country_iso_code + type: keyword + description: > + Country ISO code. + - name: location + type: geo_point + description: > + The longitude and latitude. + diff --git a/vendor/github.com/elastic/beats/filebeat/module/nginx/access/config/nginx-access.yml b/vendor/github.com/elastic/beats/filebeat/module/nginx/access/config/nginx-access.yml new file mode 100644 index 00000000..bbd45343 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/nginx/access/config/nginx-access.yml @@ -0,0 +1,9 @@ +- input_type: log + paths: + {%- for path in paths %} + - {{path}} + {%- endfor %} + exclude_files: [".gz$"] + fields: + source_type: nginx-access + pipeline_id: {{beat.pipeline_id}} diff --git a/vendor/github.com/elastic/beats/filebeat/module/nginx/access/ingest/json_with_plugins.json b/vendor/github.com/elastic/beats/filebeat/module/nginx/access/ingest/json_with_plugins.json new file mode 100644 index 00000000..fb80aca3 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/nginx/access/ingest/json_with_plugins.json @@ -0,0 +1,16 @@ +{ + "description": "pipeline for parsing Nginx logs that are JSON format", + "processors": [{ + "user_agent": { + "field": "agent" + } + }, { + "remove": { + "field": "agent" + } + }, { + "geoip": { + "field": "remote_ip" + } + }] +} diff --git a/vendor/github.com/elastic/beats/filebeat/module/nginx/access/ingest/no_plugins.json b/vendor/github.com/elastic/beats/filebeat/module/nginx/access/ingest/no_plugins.json new file mode 100644 index 00000000..1f5422b2 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/nginx/access/ingest/no_plugins.json @@ -0,0 +1,37 @@ +{ + "description": "Pipeline for parsing Nginx logs. Requires no plugins", + "processors": [{ + "grok": { + "field": "message", + "patterns":[ + "%{IPORHOST:nginx.access.remote_ip} - %{DATA:nginx.access.user_name} \\[%{HTTPDATE:nginx.access.time}\\] \"%{WORD:nginx.access.method} %{DATA:nginx.access.url} HTTP/%{NUMBER:nginx.access.http_version}\" %{NUMBER:nginx.access.response_code} %{NUMBER:nginx.access.body_sent.bytes} \"%{DATA:nginx.access.referrer}\" \"%{DATA:nginx.access.agent}\"" + ], + "ignore_missing": true + } + },{ + "remove":{ + "field": "message" + } + }, { + "rename": { + "field": "@timestamp", + "target_field": "read_timestamp" + } + }, { + "date": { + "field": "nginx.access.time", + "target_field": "@timestamp", + "formats": ["dd/MMM/YYYY:H:m:s Z"] + } + }, { + "remove": { + "field": "nginx.access.time" + } + }], + "on_failure" : [{ + "set" : { + "field" : "error", + "value" : "{{ _ingest.on_failure_message }}" + } + }] +} diff --git a/vendor/github.com/elastic/beats/filebeat/module/nginx/access/ingest/with_plugins.json b/vendor/github.com/elastic/beats/filebeat/module/nginx/access/ingest/with_plugins.json new file mode 100644 index 00000000..cf0441d5 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/nginx/access/ingest/with_plugins.json @@ -0,0 +1,51 @@ +{ + "description": "Pipeline for parsing Nginx access logs. Requires the geoip and user_agent plugins.", + "processors": [{ + "grok": { + "field": "message", + "patterns":[ + "%{IPORHOST:nginx.access.remote_ip} - %{DATA:nginx.access.user_name} \\[%{HTTPDATE:nginx.access.time}\\] \"%{WORD:nginx.access.method} %{DATA:nginx.access.url} HTTP/%{NUMBER:nginx.access.http_version}\" %{NUMBER:nginx.access.response_code} %{NUMBER:nginx.access.body_sent.bytes} \"%{DATA:nginx.access.referrer}\" \"%{DATA:nginx.access.agent}\"" + ], + "ignore_missing": true + } + },{ + "remove":{ + "field": "message" + } + }, { + "rename": { + "field": "@timestamp", + "target_field": "read_timestamp" + } + }, { + "date": { + "field": "nginx.access.time", + "target_field": "@timestamp", + "formats": ["dd/MMM/YYYY:H:m:s Z"] + } + }, { + "remove": { + "field": "nginx.access.time" + } + }, { + "user_agent": { + "field": "nginx.access.agent", + "target_field": "nginx.access.user_agent" + } + }, { + "remove": { + "field": "nginx.access.agent" + } + }, { + "geoip": { + "field": "nginx.access.remote_ip", + "target_field": "nginx.access.geoip" + } + }], + "on_failure" : [{ + "set" : { + "field" : "error", + "value" : "{{ _ingest.on_failure_message }}" + } + }] +} diff --git a/vendor/github.com/elastic/beats/filebeat/module/nginx/access/manifest.yml b/vendor/github.com/elastic/beats/filebeat/module/nginx/access/manifest.yml new file mode 100644 index 00000000..4ec44f1c --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/nginx/access/manifest.yml @@ -0,0 +1,17 @@ +module_version: 1.0 + +vars: + paths: + default: + - /var/log/nginx/access.log* + os.darwin: + - /usr/local/var/log/nginx/access.log* + os.windows: + - c:/programfiles/nginx/logs/access.log* + pipeline: + # options: with_plugins, no_plugins, json_with_plugins, json_no_plugins + default: with_plugins + +ingest_pipeline: ingest/{{pipeline}}.json +prospectors: + - config/nginx-access.yml diff --git a/vendor/github.com/elastic/beats/filebeat/module/nginx/access/test/access.log b/vendor/github.com/elastic/beats/filebeat/module/nginx/access/test/access.log new file mode 100644 index 00000000..58991d26 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/nginx/access/test/access.log @@ -0,0 +1,12 @@ +77.179.66.156 - - [25/Oct/2016:14:49:33 +0200] "GET / HTTP/1.1" 200 612 "-" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.59 Safari/537.36" +77.179.66.156 - - [25/Oct/2016:14:49:34 +0200] "GET /favicon.ico HTTP/1.1" 404 571 "http://localhost:8080/" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.59 Safari/537.36" +77.179.66.156 - - [25/Oct/2016:14:50:44 +0200] "GET /adsasd HTTP/1.1" 404 571 "-" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.59 Safari/537.36" +77.179.66.156 - - [07/Dec/2016:10:34:43 +0100] "GET / HTTP/1.1" 200 612 "-" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36" +77.179.66.156 - - [07/Dec/2016:10:34:43 +0100] "GET /favicon.ico HTTP/1.1" 404 571 "http://localhost:8080/" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36" +77.179.66.156 - - [07/Dec/2016:10:43:18 +0100] "GET /test HTTP/1.1" 404 571 "-" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36" +77.179.66.156 - - [07/Dec/2016:10:43:21 +0100] "GET /test HTTP/1.1" 404 571 "-" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36" +77.179.66.156 - - [07/Dec/2016:10:43:23 +0100] "GET /test1 HTTP/1.1" 404 571 "-" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36" +127.0.0.1 - - [07/Dec/2016:11:04:37 +0100] "GET /test1 HTTP/1.1" 404 571 "-" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36" +127.0.0.1 - - [07/Dec/2016:11:04:58 +0100] "GET / HTTP/1.1" 304 0 "-" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:49.0) Gecko/20100101 Firefox/49.0" +127.0.0.1 - - [07/Dec/2016:11:04:59 +0100] "GET / HTTP/1.1" 304 0 "-" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:49.0) Gecko/20100101 Firefox/49.0" +127.0.0.1 - - [07/Dec/2016:11:05:07 +0100] "GET /taga HTTP/1.1" 404 169 "-" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:49.0) Gecko/20100101 Firefox/49.0" diff --git a/vendor/github.com/elastic/beats/filebeat/module/nginx/error/_meta/fields.yml b/vendor/github.com/elastic/beats/filebeat/module/nginx/error/_meta/fields.yml new file mode 100644 index 00000000..bc908c10 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/nginx/error/_meta/fields.yml @@ -0,0 +1,25 @@ +- name: error + type: group + description: > + Contains fields for the Nginx error logs. + fields: + - name: level + type: keyword + description: > + Error level (e.g. error, critical). + - name: pid + type: long + description: > + Process identifier (PID). + - name: tid + type: long + description: > + Thread identifier. + - name: connection_id + type: long + description: > + Connection identifier. + - name: message + type: text + description: > + The error message diff --git a/vendor/github.com/elastic/beats/filebeat/module/nginx/error/config/nginx-error.yml b/vendor/github.com/elastic/beats/filebeat/module/nginx/error/config/nginx-error.yml new file mode 100644 index 00000000..49c78b79 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/nginx/error/config/nginx-error.yml @@ -0,0 +1,10 @@ +- input_type: log + paths: + {%- for path in paths %} + - {{path}} + {%- endfor %} + exclude_files: [".gz$"] + fields: + source_type: nginx-error + pipeline_id: {{beat.pipeline_id}} + diff --git a/vendor/github.com/elastic/beats/filebeat/module/nginx/error/ingest/pipeline.json b/vendor/github.com/elastic/beats/filebeat/module/nginx/error/ingest/pipeline.json new file mode 100644 index 00000000..e60aadde --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/nginx/error/ingest/pipeline.json @@ -0,0 +1,37 @@ +{ + "description": "Pipeline for parsing the Nginx error logs", + "processors": [{ + "grok": { + "field": "message", + "patterns": [ + "%{DATA:nginx.error.time} \\[%{DATA:nginx.error.level}\\] %{NUMBER:nginx.error.pid}#%{NUMBER:nginx.error.tid}: (\\*%{NUMBER:nginx.error.connection_id} )?%{GREEDYDATA:nginx.error.message}" + ], + "ignore_missing": true + } + },{ + "remove":{ + "field": "message" + } + }, { + "rename": { + "field": "@timestamp", + "target_field": "read_timestamp" + } + }, { + "date": { + "field": "nginx.error.time", + "target_field": "@timestamp", + "formats": ["YYYY/MM/dd H:m:s"] + } + }, { + "remove": { + "field": "nginx.error.time" + } + }], + "on_failure" : [{ + "set" : { + "field" : "error", + "value" : "{{ _ingest.on_failure_message }}" + } + }] +} diff --git a/vendor/github.com/elastic/beats/filebeat/module/nginx/error/manifest.yml b/vendor/github.com/elastic/beats/filebeat/module/nginx/error/manifest.yml new file mode 100644 index 00000000..33b8c4ca --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/nginx/error/manifest.yml @@ -0,0 +1,14 @@ +module_version: 1.0 + +vars: + paths: + default: + - /var/log/nginx/error.log* + os.darwin: + - /usr/local/var/log/nginx/error.log* + os.windows: + - c:/programfiles/nginx/logs/error.log* + +ingest_pipeline: ingest/pipeline.json +prospectors: + - config/nginx-error.yml diff --git a/vendor/github.com/elastic/beats/filebeat/module/nginx/error/test/error.log b/vendor/github.com/elastic/beats/filebeat/module/nginx/error/test/error.log new file mode 100644 index 00000000..864d98d1 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/nginx/error/test/error.log @@ -0,0 +1,2 @@ +2016/10/25 14:49:34 [error] 54053#0: *1 open() "/usr/local/Cellar/nginx/1.10.2_1/html/favicon.ico" failed (2: No such file or directory), client: 127.0.0.1, server: localhost, request: "GET /favicon.ico HTTP/1.1", host: "localhost:8080", referrer: "http://localhost:8080/" +2016/10/25 14:50:44 [error] 54053#0: *3 open() "/usr/local/Cellar/nginx/1.10.2_1/html/adsasd" failed (2: No such file or directory), client: 127.0.0.1, server: localhost, request: "GET /adsasd HTTP/1.1", host: "localhost:8080" diff --git a/vendor/github.com/elastic/beats/filebeat/module/syslog/_meta/fields.yml b/vendor/github.com/elastic/beats/filebeat/module/syslog/_meta/fields.yml new file mode 100644 index 00000000..750ba3b1 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/syslog/_meta/fields.yml @@ -0,0 +1,10 @@ +- key: syslog + title: "Syslog" + description: > + Module for parsing syslog files. + fields: + - name: syslog + type: group + description: > + Fields from the syslog files. + fields: diff --git a/vendor/github.com/elastic/beats/filebeat/module/syslog/_meta/kibana/dashboard/Filebeat-syslog-dashboard.json b/vendor/github.com/elastic/beats/filebeat/module/syslog/_meta/kibana/dashboard/Filebeat-syslog-dashboard.json new file mode 100644 index 00000000..7275bf78 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/syslog/_meta/kibana/dashboard/Filebeat-syslog-dashboard.json @@ -0,0 +1,13 @@ +{ + "hits": 0, + "timeRestore": false, + "description": "", + "title": "Filebeat syslog dashboard", + "uiStateJSON": "{}", + "panelsJSON": "[{\"id\":\"Syslog-events-by-hostname\",\"type\":\"visualization\",\"panelIndex\":1,\"size_x\":8,\"size_y\":4,\"col\":1,\"row\":1},{\"id\":\"Syslog-hostnames-and-processes\",\"type\":\"visualization\",\"panelIndex\":2,\"size_x\":4,\"size_y\":4,\"col\":9,\"row\":1},{\"id\":\"Syslog-system-logs\",\"type\":\"search\",\"panelIndex\":3,\"size_x\":12,\"size_y\":7,\"col\":1,\"row\":5,\"columns\":[\"syslog.system.hostname\",\"syslog.system.program\",\"syslog.system.message\"],\"sort\":[\"@timestamp\",\"desc\"]}]", + "optionsJSON": "{\"darkTheme\":false}", + "version": 1, + "kibanaSavedObjectMeta": { + "searchSourceJSON": "{\"filter\":[{\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true}}}]}" + } +} \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/filebeat/module/syslog/_meta/kibana/search/Syslog-system-logs.json b/vendor/github.com/elastic/beats/filebeat/module/syslog/_meta/kibana/search/Syslog-system-logs.json new file mode 100644 index 00000000..e8aa5609 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/syslog/_meta/kibana/search/Syslog-system-logs.json @@ -0,0 +1,18 @@ +{ + "sort": [ + "@timestamp", + "desc" + ], + "hits": 0, + "description": "", + "title": "Syslog system logs", + "version": 1, + "kibanaSavedObjectMeta": { + "searchSourceJSON": "{\"index\":\"filebeat-*\",\"filter\":[],\"highlight\":{\"pre_tags\":[\"@kibana-highlighted-field@\"],\"post_tags\":[\"@/kibana-highlighted-field@\"],\"fields\":{\"*\":{}},\"require_field_match\":false,\"fragment_size\":2147483647},\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true}}}" + }, + "columns": [ + "syslog.system.hostname", + "syslog.system.program", + "syslog.system.message" + ] +} \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/filebeat/module/syslog/_meta/kibana/visualization/Syslog-events-by-hostname.json b/vendor/github.com/elastic/beats/filebeat/module/syslog/_meta/kibana/visualization/Syslog-events-by-hostname.json new file mode 100644 index 00000000..bcdef77d --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/syslog/_meta/kibana/visualization/Syslog-events-by-hostname.json @@ -0,0 +1,11 @@ +{ + "visState": "{\"title\":\"Syslog events by hostname\",\"type\":\"histogram\",\"params\":{\"shareYAxis\":true,\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"scale\":\"linear\",\"mode\":\"stacked\",\"times\":[],\"addTimeMarker\":false,\"defaultYExtents\":false,\"setYExtents\":false,\"yAxis\":{}},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"date_histogram\",\"schema\":\"segment\",\"params\":{\"field\":\"@timestamp\",\"interval\":\"auto\",\"customInterval\":\"2h\",\"min_doc_count\":1,\"extended_bounds\":{}}},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"group\",\"params\":{\"field\":\"syslog.system.hostname\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}", + "description": "", + "title": "Syslog events by hostname", + "uiStateJSON": "{}", + "version": 1, + "savedSearchId": "Syslog-system-logs", + "kibanaSavedObjectMeta": { + "searchSourceJSON": "{\"filter\":[]}" + } +} \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/filebeat/module/syslog/_meta/kibana/visualization/Syslog-hostnames-and-processes.json b/vendor/github.com/elastic/beats/filebeat/module/syslog/_meta/kibana/visualization/Syslog-hostnames-and-processes.json new file mode 100644 index 00000000..38f3fcae --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/syslog/_meta/kibana/visualization/Syslog-hostnames-and-processes.json @@ -0,0 +1,11 @@ +{ + "visState": "{\"title\":\"Syslog hostnames and processes\",\"type\":\"pie\",\"params\":{\"shareYAxis\":true,\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"bottom\",\"isDonut\":true},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"syslog.system.hostname\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"syslog.system.program\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}", + "description": "", + "title": "Syslog hostnames and processes", + "uiStateJSON": "{}", + "version": 1, + "savedSearchId": "Syslog-system-logs", + "kibanaSavedObjectMeta": { + "searchSourceJSON": "{\"filter\":[]}" + } +} \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/filebeat/module/syslog/system/_meta/fields.yml b/vendor/github.com/elastic/beats/filebeat/module/syslog/system/_meta/fields.yml new file mode 100644 index 00000000..92174268 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/syslog/system/_meta/fields.yml @@ -0,0 +1,20 @@ +- name: system + type: group + description: > + Contains fields from the syslog system logs. + fields: + - name: timestamp + description: > + The timestamp as read from the syslog message. + - name: hostname + description: > + The hostname as read from the syslog message. + - name: program + description: > + The process name as read from the syslog message. + - name: pid + description: > + The PID of the process that sent the syslog message. + - name: message + description: > + The message in the log line. diff --git a/vendor/github.com/elastic/beats/filebeat/module/syslog/system/config/system.yml b/vendor/github.com/elastic/beats/filebeat/module/syslog/system/config/system.yml new file mode 100644 index 00000000..aac6c308 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/syslog/system/config/system.yml @@ -0,0 +1,12 @@ +- input_type: log + paths: + {%- for path in paths %} + - {{path}} + {%- endfor %} + exclude_files: [".gz$"] + multiline: + pattern: "^\\s" + match: after + fields: + source_type: syslog-system + pipeline_id: {{beat.pipeline_id}} diff --git a/vendor/github.com/elastic/beats/filebeat/module/syslog/system/ingest/pipeline.json b/vendor/github.com/elastic/beats/filebeat/module/syslog/system/ingest/pipeline.json new file mode 100644 index 00000000..aaa086c4 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/syslog/system/ingest/pipeline.json @@ -0,0 +1,39 @@ +{ + "description": "Pipeline for parsing Syslog messages.", + "processors": [ + { + "grok": { + "field": "message", + "patterns": [ + "%{SYSLOGTIMESTAMP:syslog.system.timestamp} %{SYSLOGHOST:syslog.system.hostname} %{DATA:syslog.system.program}(?:\\[%{POSINT:syslog.system.pid}\\])?: %{GREEDYMULTILINE:syslog.system.message}" + ], + "pattern_definitions" : { + "GREEDYMULTILINE" : "(.|\n)*" + }, + "ignore_missing": true + } + }, + { + "remove": { + "field": "message" + } + }, + { + "date": { + "field": "syslog.system.timestamp", + "target_field": "@timestamp", + "formats": [ + "MMM d HH:mm:ss", + "MMM dd HH:mm:ss" + ], + "ignore_failure": true + } + } + ], + "on_failure" : [{ + "set" : { + "field" : "error", + "value" : "{{ _ingest.on_failure_message }}" + } + }] +} diff --git a/vendor/github.com/elastic/beats/filebeat/module/syslog/system/manifest.yml b/vendor/github.com/elastic/beats/filebeat/module/syslog/system/manifest.yml new file mode 100644 index 00000000..63d77260 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/syslog/system/manifest.yml @@ -0,0 +1,14 @@ +module_version: 1.0 + +vars: + paths: + default: + - /var/log/messages* + - /var/log/syslog* + os.darwin: + - /var/log/system.log* + os.windows: [] + +ingest_pipeline: ingest/pipeline.json +prospectors: + - config/system.yml diff --git a/vendor/github.com/elastic/beats/filebeat/module/syslog/system/test/darwin-syslog-sample.log b/vendor/github.com/elastic/beats/filebeat/module/syslog/system/test/darwin-syslog-sample.log new file mode 100644 index 00000000..55e44c52 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/syslog/system/test/darwin-syslog-sample.log @@ -0,0 +1,20 @@ +Dec 13 11:35:28 a-mac-with-esc-key GoogleSoftwareUpdateAgent[21412]: 2016-12-13 11:35:28.420 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSAgentApp updateProductWithProductID:usingEngine:] Checking for updates for "All Products" using engine + >> + processor= + isProcessing=NO actionsCompleted=0 progress=0.00 + errors=0 currentActionErrors=0 + events=0 currentActionEvents=0 + actionQueue=( ) + > + delegate=(null) + serverInfoStore=(null) + errors=0 + > +Dec 13 11:35:28 a-mac-with-esc-key GoogleSoftwareUpdateAgent[21412]: 2016-12-13 11:35:28.421 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSUpdateEngine updateAllExceptProduct:] KSUpdateEngine updating all installed products, except:'com.google.Keystone'. diff --git a/vendor/github.com/elastic/beats/filebeat/module/syslog/system/test/darwin-syslog-sample.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/syslog/system/test/darwin-syslog-sample.log-expected.json new file mode 100644 index 00000000..1a41743c --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/syslog/system/test/darwin-syslog-sample.log-expected.json @@ -0,0 +1,80 @@ +[ +{ + "_index": "filebeat-2016.12.15", + "_type": "log", + "_id": "AVkBzqQ6j7PoDSoX1nl9", + "_score": null, + "_source": { + "@timestamp": "2016-12-13T11:35:28.000Z", + "offset": 907, + "beat": { + "hostname": "a-mac-with-esc-key.local", + "name": "a-mac-with-esc-key.local", + "version": "6.0.0-alpha1" + }, + "input_type": "log", + "source": "module/syslog/system/test/darwin-syslog-sample.log", + "syslog": { + "system": { + "hostname": "a-mac-with-esc-key", + "pid": "21412", + "program": "GoogleSoftwareUpdateAgent", + "message": "2016-12-13 11:35:28.420 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSAgentApp updateProductWithProductID:usingEngine:] Checking for updates for \"All Products\" using engine \n\t\t>>\n\t\tprocessor=\n\t\t\tisProcessing=NO actionsCompleted=0 progress=0.00\n\t\t\terrors=0 currentActionErrors=0\n\t\t\tevents=0 currentActionEvents=0\n\t\t\tactionQueue=( )\n\t\t>\n\t\tdelegate=(null)\n\t\tserverInfoStore=(null)\n\t\terrors=0\n\t>", + "timestamp": "Dec 13 11:35:28" + } + }, + "fields": { + "pipeline_id": "syslog-system-pipeline", + "source_type": "syslog-system" + }, + "type": "log" + }, + "fields": { + "@timestamp": [ + 1481628928000 + ] + }, + "sort": [ + 1481628928000 + ] +}, +{ + "_index": "filebeat-2016.12.15", + "_type": "log", + "_id": "AVkBzrcuj7PoDSoX1nl-", + "_score": null, + "_source": { + "@timestamp": "2016-12-13T11:35:28.000Z", + "offset": 1176, + "beat": { + "hostname": "a-mac-with-esc-key.local", + "name": "a-mac-with-esc-key.local", + "version": "6.0.0-alpha1" + }, + "input_type": "log", + "source": "module/syslog/system/test/darwin-syslog-sample.log", + "syslog": { + "system": { + "hostname": "a-mac-with-esc-key", + "pid": "21412", + "program": "GoogleSoftwareUpdateAgent", + "message": "2016-12-13 11:35:28.421 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSUpdateEngine updateAllExceptProduct:] KSUpdateEngine updating all installed products, except:'com.google.Keystone'.", + "timestamp": "Dec 13 11:35:28" + } + }, + "fields": { + "pipeline_id": "syslog-system-pipeline", + "source_type": "syslog-system" + }, + "type": "log" + }, + "fields": { + "@timestamp": [ + 1481628928000 + ] + }, + "sort": [ + 1481628928000 + ] +} +] diff --git a/vendor/github.com/elastic/beats/filebeat/module/syslog/system/test/darwin-syslog.log b/vendor/github.com/elastic/beats/filebeat/module/syslog/system/test/darwin-syslog.log new file mode 100644 index 00000000..f0329c33 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/syslog/system/test/darwin-syslog.log @@ -0,0 +1,497 @@ +Dec 13 11:35:28 a-mac-with-esc-key GoogleSoftwareUpdateAgent[21412]: 2016-12-13 11:35:28.419 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSAgentApp performSelfUpdateWithEngine:] Finished self update check. +Dec 13 11:35:28 a-mac-with-esc-key GoogleSoftwareUpdateAgent[21412]: 2016-12-13 11:35:28.420 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSAgentApp updateProductWithProductID:usingEngine:] Checking for updates for "All Products" using engine + >> + processor= + isProcessing=NO actionsCompleted=0 progress=0.00 + errors=0 currentActionErrors=0 + events=0 currentActionEvents=0 + actionQueue=( ) + > + delegate=(null) + serverInfoStore=(null) + errors=0 + > +Dec 13 11:35:28 a-mac-with-esc-key GoogleSoftwareUpdateAgent[21412]: 2016-12-13 11:35:28.421 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSUpdateEngine updateAllExceptProduct:] KSUpdateEngine updating all installed products, except:'com.google.Keystone'. +Dec 13 11:35:28 a-mac-with-esc-key GoogleSoftwareUpdateAgent[21412]: 2016-12-13 11:35:28.422 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSCheckAction performAction] KSCheckAction checking 2 ticket(s). +Dec 13 11:35:28 a-mac-with-esc-key GoogleSoftwareUpdateAgent[21412]: 2016-12-13 11:35:28.428 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSUpdateCheckAction performAction] KSUpdateCheckAction starting update check for ticket(s): {( + + serverType=Omaha + url=https://tools.google.com/service/update2 + creationDate=2015-06-25 15:40:23 + tagPath=/Applications/Google Chrome.app/Contents/Info.plist + tagKey=KSChannelID + brandPath=/Users/tsg/Library/Google/Google Chrome Brand.plist + brandKey=KSBrandID + versionPath=/Applications/Google Chrome.app/Contents/Info.plist + versionKey=KSVersion + cohort=1:1y5:gy3@0.05 + cohortName=Stable + ticketVersion=1 + >, + + serverType=Omaha + url=https://tools.google.com/service/update2 + creationDate=2015-09-11 20:38:12 + ticketVersion=1 + > + )} + Using server: + > +Dec 13 11:35:28 a-mac-with-esc-key GoogleSoftwareUpdateAgent[21412]: 2016-12-13 11:35:28.446 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] +[KSCodeSigningVerification verifyBundle:applicationId:error:] KSCodeSigningVerification verifying code signing for '/Applications/Google Chrome.app' with the requirement 'anchor apple generic and certificate 1[field.1.2.840.113635.100.6.2.6] exists and certificate leaf[field.1.2.840.113635.100.6.1.13] exists and certificate leaf[subject.OU]="EQHXZ8M8AV" and (identifier="com.google.Chrome")' +Dec 13 11:35:29 a-mac-with-esc-key GoogleSoftwareUpdateAgent[21412]: 2016-12-13 11:35:29.430 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] +[KSCodeSigningVerification verifyBundle:applicationId:error:] KSCodeSigningVerification verifying code signing for '/Applications/Google Drive.app' with the requirement 'anchor apple generic and certificate 1[field.1.2.840.113635.100.6.2.6] exists and certificate leaf[field.1.2.840.113635.100.6.1.13] exists and certificate leaf[subject.OU]="EQHXZ8M8AV" and (identifier="com.google.GoogleDrive")' +Dec 13 11:35:30 a-mac-with-esc-key GoogleSoftwareUpdateAgent[21412]: 2016-12-13 11:35:30.115 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSUpdateCheckAction performAction] KSUpdateCheckAction running KSServerUpdateRequest: + url="https://tools.google.com/service/update2?cup2hreq=423332d883f010d5b10e169646ed851278047f76e6c5d4dbfa2233ef66e3b141&cup2key=6:1566315822" + fallbackURLs=( + http://tools.google.com/service/update2?cup2hreq=423332d883f010d5b10e169646ed851278047f76e6c5d4dbfa2233ef66e3b141&cup2key=6:1617080069 + ) + runningFetchers=0 + tickets=2 + body= + + + + + + + + + + + + + headers={ + "X-GoogleUpdate-Interactivity" = bg; + } + > +Dec 13 11:35:30 a-mac-with-esc-key GoogleSoftwareUpdateAgent[21412]: 2016-12-13 11:35:30.116 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSOutOfProcessFetcher beginFetchWithDelegate:] KSOutOfProcessFetcher start fetch from URL: "https://tools.google.com/service/update2?cup2hreq=423332d883f010d5b10e169646ed851278047f76e6c5d4dbfa2233ef66e3b141&cup2key=6:1566315822" +Dec 13 11:35:30 a-mac-with-esc-key GoogleSoftwareUpdateAgent[21412]: 2016-12-13 11:35:30.117 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSOutOfProcessFetcher(PrivateMethods) launchedHelperTaskForToolPath:error:] KSOutOfProcessFetcher launched '/Users/tsg/Library/Google/GoogleSoftwareUpdate/GoogleSoftwareUpdate.bundle/Contents/MacOS/ksfetch' with process id: 21414 +Dec 13 11:35:30 a-mac-with-esc-key GoogleSoftwareUpdateAgent[21412]: 2016-12-13 11:35:30.118 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSOutOfProcessFetcher beginFetchWithDelegate:] KSOutOfProcessFetcher sending both request and download file location to the helper. +Dec 13 11:35:30 a-mac-with-esc-key GoogleSoftwareUpdateAgent[21412]: 2016-12-13 11:35:30.118 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] KSSendAllDataToHelper() KSHelperTool wrote 2383 bytes to the helper input. +Dec 13 11:35:30 a-mac-with-esc-key GoogleSoftwareUpdateAgent[21412]: 2016-12-13 11:35:30.118 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSOutOfProcessFetcher beginFetchWithDelegate:] Closing the file handle. +Dec 13 11:35:30 a-mac-with-esc-key GoogleSoftwareUpdateAgent[21412]: 2016-12-13 11:35:30.118 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSOutOfProcessFetcher beginFetchWithDelegate:] KSOutOfProcessFetcher fetching from URL: "https://tools.google.com/service/update2?cup2hreq=423332d883f010d5b10e169646ed851278047f76e6c5d4dbfa2233ef66e3b141&cup2key=6:1566315822" +Dec 13 11:35:30 a-mac-with-esc-key ksfetch[21414]: 2016-12-13 11:35:30.149 ksfetch[21414/0x7fffcc3f93c0] [lvl=2] KSHelperReceiveAllData() KSHelperTool read 2383 bytes from stdin. +Dec 13 11:35:30 a-mac-with-esc-key ksfetch[21414]: 2016-12-13 11:35:30.151 ksfetch[21414/0x7fffcc3f93c0] [lvl=2] main() Fetcher received a request: { URL: https://tools.google.com/service/update2?cup2hreq=423332d883f010d5b10e169646ed851278047f76e6c5d4dbfa2233ef66e3b141&cup2key=6:1566315822 } +Dec 13 11:35:30 a-mac-with-esc-key ksfetch[21414]: 2016-12-13 11:35:30.151 ksfetch[21414/0x7fffcc3f93c0] [lvl=2] main() Fetcher received a download path: /tmp/KSOutOfProcessFetcher.QTqOLkktQz/download +Dec 13 11:35:30 a-mac-with-esc-key ksfetch[21414]: 2016-12-13 11:35:30.152 ksfetch[21414/0x7fffcc3f93c0] [lvl=2] main() ksfetch fetching URL ( { URL: https://tools.google.com/service/update2?cup2hreq=423332d883f010d5b10e169646ed851278047f76e6c5d4dbfa2233ef66e3b141&cup2key=6:1566315822 }) to folder:/tmp/KSOutOfProcessFetcher.QTqOLkktQz/download +Dec 13 11:35:30 a-mac-with-esc-key ksfetch[21414]: 2016-12-13 11:35:30.152 ksfetch[21414/0x7fffcc3f93c0] [lvl=2] main() Setting up download file handles... +Dec 13 11:35:30 a-mac-with-esc-key ksfetch[21414]: 2016-12-13 11:35:30.348 ksfetch[21414/0x7fffcc3f93c0] [lvl=2] -[FetchDelegate fetcher:finishedWithData:] Fetcher downloaded successfully data of length: 0 +Dec 13 11:35:30 a-mac-with-esc-key ksfetch[21414]: 2016-12-13 11:35:30.348 ksfetch[21414/0x7fffcc3f93c0] [lvl=2] main() ksfetch done fetching. +Dec 13 11:35:30 a-mac-with-esc-key ksfetch[21414]: 2016-12-13 11:35:30.351 ksfetch[21414/0x7fffcc3f93c0] [lvl=2] main() Fetcher is exiting. +Dec 13 11:35:30 a-mac-with-esc-key GoogleSoftwareUpdateAgent[21412]: 2016-12-13 11:35:30.354 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSOutOfProcessFetcher(PrivateMethods) helperErrorAvailable:] KSOutOfProcessFetcher helper tool raw STDERR: + : <> +Dec 13 11:35:30 a-mac-with-esc-key GoogleSoftwareUpdateAgent[21412]: 2016-12-13 11:35:30.354 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSOutOfProcessFetcher(PrivateMethods) helperDidTerminate:] KSOutOfProcessFetcher fetch ended for URL: "https://tools.google.com/service/update2?cup2hreq=423332d883f010d5b10e169646ed851278047f76e6c5d4dbfa2233ef66e3b141&cup2key=6:1566315822" +Dec 13 11:35:30 a-mac-with-esc-key GoogleSoftwareUpdateAgent[21412]: 2016-12-13 11:35:30.355 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSUpdateCheckAction(KSServerUpdateRequestDelegate) serverRequest:fetchedWithResponse:] KSUpdateCheckAction received KSServerUpdateResponse: + url="https://tools.google.com/service/update2?cup2hreq=423332d883f010d5b10e169646ed851278047f76e6c5d4dbfa2233ef66e3b141&cup2key=6:1566315822" + tickets=2 + status=200 + data= + + + + + + + + + + + + + > +Dec 13 11:35:30 a-mac-with-esc-key GoogleSoftwareUpdateAgent[21412]: 2016-12-13 11:35:30.356 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSOmahaServer updateInfosForUpdateResponse:updateRequest:infoStore:upToDateTickets:updatedTickets:events:errors:] Response passed CUP validation. +Dec 13 11:35:30 a-mac-with-esc-key GoogleSoftwareUpdateAgent[21412]: 2016-12-13 11:35:30.381 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSUpdateCheckAction(PrivateMethods) finishAction] KSUpdateCheckAction found updates: {( )} +Dec 13 11:35:30 a-mac-with-esc-key GoogleSoftwareUpdateAgent[21412]: 2016-12-13 11:35:30.384 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSPrefetchAction performAction] KSPrefetchAction no updates to prefetch. +Dec 13 11:35:30 a-mac-with-esc-key GoogleSoftwareUpdateAgent[21412]: 2016-12-13 11:35:30.384 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSMultiUpdateAction performAction] KSSilentUpdateAction had no updates to apply. +Dec 13 11:35:30 a-mac-with-esc-key GoogleSoftwareUpdateAgent[21412]: 2016-12-13 11:35:30.384 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSMultiUpdateAction performAction] KSPromptAction had no updates to apply. +Dec 13 11:35:30 a-mac-with-esc-key GoogleSoftwareUpdateAgent[21412]: 2016-12-13 11:35:30.384 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSAgentApp(KeystoneDelegate) updateEngineFinishedWithErrors:] Keystone finished: errors=0 +Dec 13 11:35:30 a-mac-with-esc-key GoogleSoftwareUpdateAgent[21412]: 2016-12-13 11:35:30.385 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSUpdateEngine(PrivateMethods) updateFinish] KSUpdateEngine update processing complete. +Dec 13 11:35:31 a-mac-with-esc-key GoogleSoftwareUpdateAgent[21412]: 2016-12-13 11:35:31.142 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSAgentApp updateProductWithProductID:usingEngine:] Done checking for updates for '"All Products"' using engine + >> + processor= + isProcessing=NO actionsCompleted=0 progress=0.00 + errors=0 currentActionErrors=0 + events=0 currentActionEvents=0 + actionQueue=( ) + > + delegate= + serverInfoStore= + errors=0 + > +Dec 13 11:35:31 a-mac-with-esc-key GoogleSoftwareUpdateAgent[21412]: 2016-12-13 11:35:31.302 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSAgentUploader fetcher:finishedWithData:] Successfully uploaded stats to { URL: https://tools.google.com/service/update2 } +Dec 13 11:35:31 a-mac-with-esc-key GoogleSoftwareUpdateAgent[21412]: 2016-12-13 11:35:31.431 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSAgentApp uploadStats:] Successfully uploaded stats +Dec 13 11:35:32 a-mac-with-esc-key GoogleSoftwareUpdateAgent[21412]: 2016-12-13 11:35:32.508 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSAgentApp(KeystoneThread) runKeystonesInThreadWithArg:] Finished with engine thread +Dec 13 11:35:32 a-mac-with-esc-key GoogleSoftwareUpdateAgent[21412]: 2016-12-13 11:35:32.825 GoogleSoftwareUpdateAgent[21412/0x7fffcc3f93c0] [lvl=2] -[KSAgentApp checkForUpdates] Finished update check. +Dec 13 11:35:57 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x60000a8499d0 holds 0x2121212121212121 instead of 0x600006a22fa0. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 11:37:57 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x60800f047240 holds 0x2121212121212121 instead of 0x608002231220. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 11:38:45 a-mac-with-esc-key com.apple.xpc.launchd[1] (com.apple.quicklook[21498]): Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook +Dec 13 11:39:57 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x60000a256990 holds 0x2121212121212121 instead of 0x600006a22420. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 11:41:57 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x6080096475d0 holds 0x2121212121212121 instead of 0x608004e21280. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 11:41:57 a-mac-with-esc-key syslogd[46]: ASL Sender Statistics +Dec 13 11:42:55 a-mac-with-esc-key com.apple.xpc.launchd[1] (com.apple.quicklook[21556]): Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook +Dec 13 11:45:18 a-mac-with-esc-key com.apple.xpc.launchd[1] (com.apple.imfoundation.IMRemoteURLConnectionAgent): Unknown key for integer: _DirtyJetsamMemoryLimit +Dec 13 11:45:57 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x60000a85a860 holds 0x2121212121212121 instead of 0x600004a3b9a0. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 11:47:06 a-mac-with-esc-key com.apple.xpc.launchd[1] (com.apple.quicklook[21581]): Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook +Dec 13 11:47:57 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x608009840580 holds 0x2121212121212121 instead of 0x608004a22940. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 11:49:57 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x608009c5b700 holds 0x2121212121212121 instead of 0x608005830020. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 11:51:17 a-mac-with-esc-key com.apple.xpc.launchd[1] (com.apple.quicklook[21586]): Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook +Dec 13 11:51:57 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x60800ee592d0 holds 0x2121212121212121 instead of 0x608005627220. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 11:51:57 a-mac-with-esc-key syslogd[46]: ASL Sender Statistics +Dec 13 11:53:57 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x60000c648290 holds 0x2121212121212121 instead of 0x6000050242a0. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 11:55:28 a-mac-with-esc-key com.apple.xpc.launchd[1] (com.apple.quicklook[21589]): Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook +Dec 13 11:55:57 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x600009840460 holds 0x2121212121212121 instead of 0x60000122e940. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 11:56:30 a-mac-with-esc-key com.apple.xpc.launchd[1] (com.apple.imfoundation.IMRemoteURLConnectionAgent): Unknown key for integer: _DirtyJetsamMemoryLimit +Dec 13 11:57:57 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x60000ee5b730 holds 0x2121212121212121 instead of 0x600007821c20. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 11:59:40 a-mac-with-esc-key com.apple.xpc.launchd[1] (com.apple.quicklook[21946]): Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook +Dec 13 12:01:57 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x600006a49940 holds 0x2121212121212121 instead of 0x6000078202e0. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 12:01:57 a-mac-with-esc-key syslogd[46]: ASL Sender Statistics +Dec 13 12:03:04 a-mac-with-esc-key Slack Helper[55199]: Invoked notification with id: d63743fb-f17b-4e9e-97d0-88e0e7304682 +Dec 13 12:03:51 a-mac-with-esc-key com.apple.xpc.launchd[1] (com.apple.quicklook[21966]): Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook +Dec 13 12:05:57 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x60800f043dc0 holds 0x2121212121212121 instead of 0x6080026228c0. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 12:08:02 a-mac-with-esc-key com.apple.xpc.launchd[1] (com.apple.quicklook[21981]): Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook +Dec 13 12:09:57 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x608009a53600 holds 0x2121212121212121 instead of 0x608000629420. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 12:11:57 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x60800f259c30 holds 0x2121212121212121 instead of 0x608004a21c20. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 12:11:57 a-mac-with-esc-key syslogd[46]: ASL Sender Statistics +Dec 13 12:12:13 a-mac-with-esc-key com.apple.xpc.launchd[1] (com.apple.quicklook[22226]): Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook +Dec 13 12:13:57 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x60000c647d80 holds 0x2121212121212121 instead of 0x600006e3ee80. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 12:15:57 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x60800f053a80 holds 0x2121212121212121 instead of 0x608007227ce0. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 12:16:24 a-mac-with-esc-key com.apple.xpc.launchd[1] (com.apple.quicklook[22241]): Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook +Dec 13 12:17:57 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x60000a64ce80 holds 0x2121212121212121 instead of 0x600006629940. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 12:19:57 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x60000a843580 holds 0x2121212121212121 instead of 0x600006629540. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 12:20:35 a-mac-with-esc-key com.apple.xpc.launchd[1] (com.apple.quicklook[22254]): Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook +Dec 13 12:21:57 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x60800f45b910 holds 0x2121212121212121 instead of 0x608005822c40. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 12:21:57 a-mac-with-esc-key syslogd[46]: ASL Sender Statistics +Dec 13 12:23:13 a-mac-with-esc-key com.apple.xpc.launchd[1] (com.apple.imfoundation.IMRemoteURLConnectionAgent): Unknown key for integer: _DirtyJetsamMemoryLimit +Dec 13 12:23:57 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x60000ea5edf0 holds 0x2121212121212121 instead of 0x600003a35a60. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 12:24:46 a-mac-with-esc-key com.apple.xpc.launchd[1] (com.apple.quicklook[22265]): Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook +Dec 13 12:28:43 a-mac-with-esc-key Slack Helper[55199]: Invoked notification with id: 52bf37d9-0c4e-4276-8789-9fc7704bdf5b +Dec 13 12:28:57 a-mac-with-esc-key com.apple.xpc.launchd[1] (com.apple.quicklook[22292]): Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook +Dec 13 12:29:06 a-mac-with-esc-key Slack Helper[55199]: Invoked notification with id: c6c7e356-60a7-4b9e-a9b1-ecc2b8ad09f2 +Dec 13 12:29:57 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x60800f246430 holds 0x2121212121212121 instead of 0x608001c26d00. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 12:31:57 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x60800c85fd80 holds 0x2121212121212121 instead of 0x608005a3a420. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 12:31:57 a-mac-with-esc-key syslogd[46]: ASL Sender Statistics +Dec 13 12:33:08 a-mac-with-esc-key com.apple.xpc.launchd[1] (com.apple.quicklook[22305]): Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook +Dec 13 12:33:57 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x600006452400 holds 0x2121212121212121 instead of 0x60000763bac0. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 12:35:56 a-mac-with-esc-key GoogleSoftwareUpdateAgent[22318]: 2016-12-13 12:35:56.416 GoogleSoftwareUpdateAgent[22318/0x7fffcc3f93c0] [lvl=2] -[KSAgentApp setupLoggerOutput] Agent settings: +Dec 13 12:37:20 a-mac-with-esc-key com.apple.xpc.launchd[1] (com.apple.quicklook[22324]): Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook +Dec 13 12:37:57 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x60800f24d0f0 holds 0x2121212121212121 instead of 0x608007423ee0. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 12:39:28 a-mac-with-esc-key Slack Helper[55199]: Invoked notification with id: aa608788-d049-4d1a-9112-521c71702371 +Dec 13 12:41:06 a-mac-with-esc-key com.apple.xpc.launchd[1] (com.apple.imfoundation.IMRemoteURLConnectionAgent): Unknown key for integer: _DirtyJetsamMemoryLimit +Dec 13 12:41:26 a-mac-with-esc-key Slack Helper[55199]: Invoked notification with id: d75f9ec1-a8fd-41c2-a45e-6df2952f0702 +Dec 13 12:41:30 a-mac-with-esc-key com.apple.xpc.launchd[1] (com.apple.quicklook[22336]): Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook +Dec 13 12:41:57 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x60800a2535a0 holds 0x2121212121212121 instead of 0x608003828e20. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 12:41:57 a-mac-with-esc-key syslogd[46]: ASL Sender Statistics +Dec 13 12:43:57 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x60800f241d50 holds 0x2121212121212121 instead of 0x60800562f380. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 12:45:41 a-mac-with-esc-key com.apple.xpc.launchd[1] (com.apple.quicklook[22348]): Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook +Dec 13 12:45:57 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x60000c444450 holds 0x2121212121212121 instead of 0x600007237f00. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 12:47:58 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x60000c4424a0 holds 0x2121212121212121 instead of 0x600007026520. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 12:49:13 a-mac-with-esc-key logd[63]: _handle_cache_delete_with_urgency(0x7fc55c429b40, 0, 1) +Dec 13 12:49:13 a-mac-with-esc-key logd[63]: _volume_contains_cached_data(is /private/var/db/diagnostics/ in /) - YES +Dec 13 12:49:13 a-mac-with-esc-key logd[63]: Purged 0 bytes from log files. +Dec 13 12:49:13 a-mac-with-esc-key logd[63]: _purge_uuidtext enter - 1 +Dec 13 12:49:14 a-mac-with-esc-key logd[63]: _purge_uuidtext got 1023 UUIDs and 3 slibs from inflight logs +Dec 13 12:49:14 a-mac-with-esc-key com.apple.xpc.launchd[1] (com.apple.imfoundation.IMRemoteURLConnectionAgent): Unknown key for integer: _DirtyJetsamMemoryLimit +Dec 13 12:49:24 a-mac-with-esc-key logd[63]: _purge_uuidtext got 1303 UUIDs and 3 slibs from inflight and persistent logs +Dec 13 12:49:24 a-mac-with-esc-key logd[63]: _purge_uuidtext processing shared lib uuid 00000000-0000-0000-0000-000000000000 +Dec 13 12:49:24 a-mac-with-esc-key logd[63]: _purge_uuidtext processing shared lib uuid 519BE6A1-940A-3142-975F-4EF4F41A89B3 +Dec 13 12:49:24 a-mac-with-esc-key logd[63]: _purge_uuidtext processing shared lib uuid C43133F6-64A3-3F65-997F-0E985A66E971 +Dec 13 12:49:24 a-mac-with-esc-key logd[63]: _purge_uuidtext got 2260 UUIDs and 3 slibs from inflight and persistent logs and slibs +Dec 13 12:49:24 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 00000000-0000-0000-0000-000000000000 mentioned but not found +Dec 13 12:49:27 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 1BD0C00C-0885-4C02-B522-D1E9CBDE84E7 mentioned but not found +Dec 13 12:49:29 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 40E9BF5F-FF7F-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 60E9BF5F-FF7F-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 66A56E12-C69B-4249-BC49-760C03F3700A mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 700F0308-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 700F190B-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 700F3C07-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 700F6107-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 700F800A-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 700F8102-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 700F9401-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 700FD70E-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 700FD900-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 700FEE0B-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 700FF904-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 701F1C0F-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 701F2F0E-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 701F4C02-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 701FAE07-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 701FBD0F-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 701FE80B-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 701FEF07-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 701FF700-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 701FF90D-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 702F5E0E-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 702F6503-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 702F6B06-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 702FEB0B-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 702FFC01-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 703F0E06-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 703F4A0A-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 703F8C07-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 703F9405-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 703FA300-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 703FC709-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 703FD007-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 703FED05-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 704F0003-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 704F550C-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 704F750A-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 704F8102-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 704F8C0C-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 704F8D09-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 704FB402-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 704FBB01-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 705F030E-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 705F2D10-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 705F3B01-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 705F4E0A-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 705FA30D-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 705FDA05-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 705FDF03-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 706F5101-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 706F6300-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 706F6E05-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 706FE207-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 706FEC00-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 706FFB07-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 707F0907-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 707F6A04-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 707F7B00-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 707F9B0D-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 707FAD09-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 707FB80A-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 707FD809-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 707FE404-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 708F3207-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 708F3402-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 708F3809-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 708F470F-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 708F8A00-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 708F9F0A-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 708FB403-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 708FC507-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 708FDC07-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 708FEA0A-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 708FFC08-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 709F1005-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 709F1E0D-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 709F4C0A-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 709F5F08-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 709F6306-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 709F6903-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 709F980E-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 709FA80C-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 709FE302-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 709FE808-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 709FE809-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 709FED00-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 709FEF02-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 709FEF0A-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70AF070C-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70AF2108-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70AF270C-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70AF390B-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70AF4A0D-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70AF6D06-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70AF700E-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70AF810D-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70AF9D02-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70AFA200-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70AFBE07-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70AFCC02-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70BF210E-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70BF4C0A-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70BF9000-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70BF9302-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70BFC302-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70BFD507-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70BFD605-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70BFE302-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70BFFF03-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70CF0210-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70CF0603-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70CF0802-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70CF180F-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70CF1902-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70CF4A07-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70CF530D-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70CF590D-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70CF770D-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70CFA700-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70CFC804-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70CFE00C-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70CFEA09-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70CFED0B-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70DF4B07-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70DF7301-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70DFA303-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70DFCB0E-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70DFDD01-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70DFE504-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70E9BF5F-FF7F-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70EF2F0A-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70EF4609-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70EF5D05-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70EF7F07-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70EF8606-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70EFA406-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70EFA60F-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70EFC606-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70EFD407-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70FF0207-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70FF1E04-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70FF6F01-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70FF7703-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:31 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 70FF9708-0070-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:32 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for 80E8BF5F-FF7F-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:32 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for A0E8BF5F-FF7F-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:32 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for A0E9BF5F-FF7F-0000-FD68-88C3FF7F0000 mentioned but not found +Dec 13 12:49:32 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for B22412E8-3691-4FA9-95EA-C5B9E2A3C572 mentioned but not found +Dec 13 12:49:33 a-mac-with-esc-key logd[63]: _purge_uuidtext uuidtext file for F011D7E8-7633-3668-9455-53893C4F4B33 mentioned but not found +Dec 13 12:49:33 a-mac-with-esc-key logd[63]: _purge_uuidtext tree walked +Dec 13 12:49:33 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/00/0E757A4E2C3108A74D6C5A996AAAAB +Dec 13 12:49:33 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/00/F2131643943190B32FE35236EA4864 +Dec 13 12:49:33 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/06/608E438FDA3E28B9A262F575FE0E75 +Dec 13 12:49:33 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/09/35918C5C783B8AB2E6B75B12056F3C +Dec 13 12:49:33 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/11/DD409E112F373398E6DA86BF046EC9 +Dec 13 12:49:33 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/14/65FB07456D36EC9EC80462D86BB21B +Dec 13 12:49:33 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/18/A779EC17953910996D134A28F5C564 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/19/57E846B04C32FBAD78821B285B0D18 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/1E/79F11C7D5333F1BD0630540535F725 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/1E/9811DDA51A3BE9A4A748AD394DBE73 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/23/099C5F0A853312A9BD5694C15D228C +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/27/FBA267162735F8B5A6BF29E3A7670E +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/39/2980D3CAF73E2A94ED57F74979F1D9 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/3E/67870101A7359F88CCB9BD6681FC93 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/41/C51F4A33E03ACF86603802C9E6FFDE +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/42/BF3535B92C3272BA41F8A9BC267F3B +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/42/C18E8D6CEE37FF8DCD1390244CF38E +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/53/4B25B3C583361EADD5CB938678868C +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/54/090A60831C3233A4F0022DB86FF8B8 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/56/8EBEC4BC8230848898534D17830BB6 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/57/58C9F966E631669B74E6625D40C806 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/5E/F7315AF27B31A6A38D6364704D4FFC +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/5F/2B940389D136F2817A41C784D530CB +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/62/196B2A409236898AAD3A1520C53191 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/65/2D3DB29CBA32E297A65465CBA36B01 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/67/58A21E3D2B3620952A68EC384CC1AF +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/69/ADA53CBD3A3E31B08CFD85B12D52E1 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/72/FB1BBBCA3E30E89802A68B8B2B07F1 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/74/702F7027E834ACB0057983649FFB29 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/75/B25BA663DB34EC9AAC6971BBE817EB +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/75/B88148A6E233F8AFF323294DE561E0 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/76/2702DC49823F9E8292BB022D6BAF84 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/76/73D347C0F834879F9438D542975A23 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/78/397DF6C0253FD383E4AFAE3DD2E49C +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/7F/BCC184181A3913ADC50E38F950D098 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/81/12B328744938E1ACF2846B35CD83B4 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/82/3CB803D77334D0B5C759685022D876 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/8A/860FB569623B81B0511956EC82CEA3 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/90/9D581D35E7358AA75371D3A038142D +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/99/AC7E971E8C3319AD0514626D763823 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/9A/53817F2101396598311DB81D851FBA +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/9B/2EB7A3E93A3641B38EAD32B1CBE412 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/9F/E64976D7223E7F992BB3287AF23301 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/A7/8C02A56C0F3A9D90CAD8C92842B9A9 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/A9/733CC25E7239F98BC0812C5D7AF135 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/AB/450D449D5432C9B30A439A35B29931 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/B0/AF101031AA3188A08CF1517F800B2C +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/B4/77C958888B3AB092FD097D2C9A1B13 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/B4/BDFB4CAE49386B963E2C7A296B7D20 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/B5/0CBF2789673C6AB67F80F199CFD499 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/B6/41F64AD9923AD19AED8A35325FB04E +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/B6/566C8F2EA7349EB2C02647D2F69F97 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/BA/2A57BB4346303EA1E87862E6752057 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/C0/2D31E981553F31B0E9C36C232EE607 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/C0/E060E4E9373D4D9B4A930D3291F052 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/C2/531C46380A3DA489F7752C2FE6AEA0 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/C9/17C064F3903260A7DC304FABDDC3FD +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/CD/E2995BDA593F96B16EF1AE92AF31D8 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/CE/EE9ADE6F813CD78A1308F14010F463 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/D1/7E3015AC923AFE89BAFE6411B96431 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/D3/AE090906EC3F058A04EE77A574C8B3 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/DA/BAD1584258317A8483FE9CF10547BD +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/DD/CCB6FD639830F39A5D87247D54F616 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/E1/05E61475463784975FC5278723D08C +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/E1/B515E0321E3B85B90F01D623DC9047 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/E2/8DBEF43A0A37008A26AE9F016435F3 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/E3/55D24FAC0838679583537F319C7B72 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/EF/8522BAF9393808A2E6018507233133 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext unlinking /var/db/uuidtext/FC/F7262CC2703E32BD3808B2D50C74F0 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext total: 2209, in_use:2104, marked:23, recent:13, deleted 69 +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext slib tree cleaned up (0) +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext uuid tree cleaned up (3) +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: _purge_uuidtext cleaned up (0) +Dec 13 12:49:34 a-mac-with-esc-key logd[63]: Purged 5816519 bytes from uuidtext. +Dec 13 12:49:52 a-mac-with-esc-key com.apple.xpc.launchd[1] (com.apple.quicklook[22360]): Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook +Dec 13 12:49:57 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x600011443d90 holds 0x2121212121212121 instead of 0x600006e206c0. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 12:51:58 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x60800ac568a0 holds 0x2121212121212121 instead of 0x608003630680. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 12:51:58 a-mac-with-esc-key syslogd[46]: ASL Sender Statistics +Dec 13 12:53:58 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x60000664ad50 holds 0x2121212121212121 instead of 0x600006c31140. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 12:54:03 a-mac-with-esc-key com.apple.xpc.launchd[1] (com.apple.quicklook[22370]): Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook +Dec 13 12:55:58 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x6000060446c0 holds 0x2121212121212121 instead of 0x600006c34d60. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 12:57:58 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x60000c645c20 holds 0x2121212121212121 instead of 0x600002e295c0. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 12:58:14 a-mac-with-esc-key com.apple.xpc.launchd[1] (com.apple.quicklook[22382]): Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook +Dec 13 12:59:58 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x60800fe59330 holds 0x2121212121212121 instead of 0x608004030e80. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 13:01:58 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x60000ec41a20 holds 0x2121212121212121 instead of 0x600002e2d920. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 13:01:58 a-mac-with-esc-key syslogd[46]: ASL Sender Statistics +Dec 13 13:03:19 a-mac-with-esc-key Preview[24046]: BUG in libdispatch client: kevent[EVFILT_MACHPORT] monitored resource vanished before the source cancel handler was invoked +Dec 13 13:03:58 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x608007645da0 holds 0x2121212121212121 instead of 0x6080044252a0. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 13:05:26 a-mac-with-esc-key com.apple.xpc.launchd[1] (com.apple.quicklook[25276]): Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook +Dec 13 13:05:58 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x60000c643b20 holds 0x2121212121212121 instead of 0x6000036340a0. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 13:07:26 a-mac-with-esc-key Slack Helper[55199]: Invoked notification with id: 7cc1869b-ba48-4307-8474-0bc68cd9c71d +Dec 13 13:07:58 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x600007852ee0 holds 0x2121212121212121 instead of 0x600006a22780. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 13:09:37 a-mac-with-esc-key com.apple.xpc.launchd[1] (com.apple.quicklook[25878]): Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook +Dec 13 13:09:49 a-mac-with-esc-key com.apple.xpc.launchd[1] (com.apple.imfoundation.IMRemoteURLConnectionAgent): Unknown key for integer: _DirtyJetsamMemoryLimit +Dec 13 13:13:48 a-mac-with-esc-key com.apple.xpc.launchd[1] (com.apple.quicklook[25888]): Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook +Dec 13 13:13:48 a-mac-with-esc-key syslogd[46]: ASL Sender Statistics +Dec 13 13:13:58 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x60001125b6a0 holds 0x2121212121212121 instead of 0x600007234ce0. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 13:15:58 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x600006a41480 holds 0x2121212121212121 instead of 0x600003a2e920. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 13:17:58 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x600005a46cd0 holds 0x2121212121212121 instead of 0x60000582bd00. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 13:17:59 a-mac-with-esc-key com.apple.xpc.launchd[1] (com.apple.quicklook[25896]): Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook +Dec 13 13:19:58 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x60800ee5b730 holds 0x2121212121212121 instead of 0x6080072264c0. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 13:21:58 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x60800f65cb10 holds 0x2121212121212121 instead of 0x6080046351c0. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 13:22:10 a-mac-with-esc-key com.apple.xpc.launchd[1] (com.apple.quicklook[25914]): Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook +Dec 13 13:23:58 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x600008c56780 holds 0x2121212121212121 instead of 0x600006624600. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 13:23:58 a-mac-with-esc-key syslogd[46]: ASL Sender Statistics +Dec 13 13:25:58 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x60800f65d7a0 holds 0x2121212121212121 instead of 0x608003a3d9a0. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 13:26:21 a-mac-with-esc-key com.apple.xpc.launchd[1] (com.apple.quicklook[25923]): Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook +Dec 13 13:27:58 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x60000785e8e0 holds 0x2121212121212121 instead of 0x600006622ba0. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 13:29:58 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x60801005a980 holds 0x2121212121212121 instead of 0x608001a3f8a0. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 13:30:33 a-mac-with-esc-key com.apple.xpc.launchd[1] (com.apple.quicklook[25940]): Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook +Dec 13 13:31:58 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x60000d6588b0 holds 0x2121212121212121 instead of 0x600002a3dd60. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 13:32:28 a-mac-with-esc-key com.apple.xpc.launchd[1] (com.apple.imfoundation.IMRemoteURLConnectionAgent): Unknown key for integer: _DirtyJetsamMemoryLimit +Dec 13 13:33:58 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x60800f459990 holds 0x2121212121212121 instead of 0x60800463e7e0. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 13:33:58 a-mac-with-esc-key syslogd[46]: ASL Sender Statistics +Dec 13 13:34:44 a-mac-with-esc-key com.apple.xpc.launchd[1] (com.apple.quicklook[26381]): Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook +Dec 13 13:35:59 a-mac-with-esc-key Google Chrome[85294]: objc[85294]: __weak variable at 0x60000be429b0 holds 0x2121212121212121 instead of 0x600003c325e0. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug. +Dec 13 13:36:19 a-mac-with-esc-key GoogleSoftwareUpdateAgent[27321]: 2016-12-13 13:36:19.906 GoogleSoftwareUpdateAgent[27321/0x7fffcc3f93c0] [lvl=2] -[KSAgentApp setupLoggerOutput] Agent settings: diff --git a/vendor/github.com/elastic/beats/filebeat/prospector/config.go b/vendor/github.com/elastic/beats/filebeat/prospector/config.go index c73b28d4..1c0e8156 100644 --- a/vendor/github.com/elastic/beats/filebeat/prospector/config.go +++ b/vendor/github.com/elastic/beats/filebeat/prospector/config.go @@ -10,6 +10,7 @@ import ( var ( defaultConfig = prospectorConfig{ + Enabled: true, IgnoreOlder: 0, ScanFrequency: 10 * time.Second, InputType: cfg.DefaultInputType, @@ -22,6 +23,7 @@ var ( ) type prospectorConfig struct { + Enabled bool `config:"enabled"` ExcludeFiles []*regexp.Regexp `config:"exclude_files"` IgnoreOlder time.Duration `config:"ignore_older"` Paths []string `config:"paths"` diff --git a/vendor/github.com/elastic/beats/filebeat/prospector/prospector.go b/vendor/github.com/elastic/beats/filebeat/prospector/prospector.go index bb4d7c77..57bf0101 100644 --- a/vendor/github.com/elastic/beats/filebeat/prospector/prospector.go +++ b/vendor/github.com/elastic/beats/filebeat/prospector/prospector.go @@ -21,6 +21,8 @@ var ( ) type Prospector struct { + // harvesterCount MUST be first field in struct. See https://github.com/golang/go/issues/599 + harvesterCounter uint64 // Must be 8-byte aligned. Ensured if first field in struct cfg *common.Config // Raw config config prospectorConfig prospectorer Prospectorer @@ -30,7 +32,6 @@ type Prospector struct { states *file.States wg sync.WaitGroup channelWg sync.WaitGroup // Separate waitgroup for channels as not stopped on completion - harvesterCounter uint64 } type Prospectorer interface { @@ -158,6 +159,11 @@ func (p *Prospector) Run(once bool) { } } +// IsEnabled returns true if the prospector is eanbled +func (p *Prospector) IsEnabled() bool { + return p.config.Enabled +} + // updateState updates the prospector state and forwards the event to the spooler // All state updates done by the prospector itself are synchronous to make sure not states are overwritten func (p *Prospector) updateState(event *input.Event) error { diff --git a/vendor/github.com/elastic/beats/filebeat/scripts/create_fileset.py b/vendor/github.com/elastic/beats/filebeat/scripts/create_fileset.py new file mode 100644 index 00000000..31c9c24c --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/scripts/create_fileset.py @@ -0,0 +1,103 @@ +import os +import argparse + +# Creates a new fileset with all the necessary files. +# In case the module does not exist, also the module is created. + + +def generate_fileset(base_path, metricbeat_path, module, fileset): + + generate_module(base_path, metricbeat_path, module, fileset) + fileset_path = base_path + "/module/" + module + "/" + fileset + meta_path = fileset_path + "/_meta" + + if os.path.isdir(fileset_path): + print("Fileset already exists. Skipping creating fileset {}" + .format(fileset)) + return + + os.makedirs(meta_path) + os.makedirs(os.path.join(fileset_path, "test")) + + templates = metricbeat_path + "/scripts/module/fileset/" + + content = load_file(templates + "fields.yml", module, fileset) + with open(meta_path + "/fields.yml", "w") as f: + f.write(content) + + os.makedirs(os.path.join(fileset_path, "config")) + content = load_file(templates + "/config/config.yml", module, fileset) + with open("{}/config/{}.yml".format(fileset_path, fileset), "w") as f: + f.write(content) + + os.makedirs(os.path.join(fileset_path, "ingest")) + content = load_file(templates + "/ingest/pipeline.json", module, fileset) + with open("{}/ingest/pipeline.json".format(fileset_path), "w") as f: + f.write(content) + + content = load_file(templates + "/manifest.yml", module, fileset) + with open("{}/manifest.yml".format(fileset_path), "w") as f: + f.write(content) + + print("Fileset {} created.".format(fileset)) + + +def generate_module(base_path, metricbeat_path, module, fileset): + + module_path = base_path + "/module/" + module + meta_path = module_path + "/_meta" + + if os.path.isdir(module_path): + print("Module already exists. Skipping creating module {}" + .format(module)) + return + + os.makedirs(meta_path) + + templates = metricbeat_path + "/scripts/module/" + + content = load_file(templates + "fields.yml", module, "") + with open(meta_path + "/fields.yml", "w") as f: + f.write(content) + + print("Module {} created.".format(module)) + + +def load_file(file, module, fileset): + content = "" + with open(file) as f: + content = f.read() + + return content.replace("{module}", module).replace("{fileset}", fileset) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Creates a fileset") + parser.add_argument("--module", help="Module name") + parser.add_argument("--fileset", help="Fileset name") + + parser.add_argument("--path", help="Beat path") + parser.add_argument("--es_beats", + help="The path to the general beats folder") + + args = parser.parse_args() + + if args.path is None: + args.path = './' + print "Set default path for beat path: " + args.path + + if args.es_beats is None: + args.es_beats = '../' + print "Set default path for es_beats path: " + args.es_beats + + if args.module is None or args.module == '': + args.module = raw_input("Module name: ") + + if args.fileset is None or args.fileset == '': + args.fileset = raw_input("Fileset name: ") + + path = os.path.abspath(args.path) + filebeat_path = os.path.abspath(args.es_beats + "/filebeat") + + generate_fileset(path, filebeat_path, args.module.lower(), + args.fileset.lower()) diff --git a/vendor/github.com/elastic/beats/filebeat/scripts/module/fields.yml b/vendor/github.com/elastic/beats/filebeat/scripts/module/fields.yml new file mode 100644 index 00000000..50d767ec --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/scripts/module/fields.yml @@ -0,0 +1,9 @@ +- key: {module} + title: "{module}" + description: > + {module} Module + fields: + - name: {module} + type: group + description: > + fields: diff --git a/vendor/github.com/elastic/beats/filebeat/scripts/module/fileset/config/config.yml b/vendor/github.com/elastic/beats/filebeat/scripts/module/fileset/config/config.yml new file mode 100644 index 00000000..20122796 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/scripts/module/fileset/config/config.yml @@ -0,0 +1,9 @@ +- input_type: log + paths: + {%- for path in paths %} + - {{path}} + {%- endfor %} + exclude_files: [".gz$"] + fields: + source_type: {module}-{fileset} + pipeline_id: {{beat.pipeline_id}} diff --git a/vendor/github.com/elastic/beats/filebeat/scripts/module/fileset/fields.yml b/vendor/github.com/elastic/beats/filebeat/scripts/module/fileset/fields.yml new file mode 100644 index 00000000..d237ec1b --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/scripts/module/fileset/fields.yml @@ -0,0 +1,9 @@ +- name: {fileset} + type: group + description: > + {fileset} + fields: + - name: example + type: keyword + description: > + Example field diff --git a/vendor/github.com/elastic/beats/filebeat/scripts/module/fileset/ingest/pipeline.json b/vendor/github.com/elastic/beats/filebeat/scripts/module/fileset/ingest/pipeline.json new file mode 100644 index 00000000..90fde13d --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/scripts/module/fileset/ingest/pipeline.json @@ -0,0 +1,11 @@ +{ + "description": "Pipeline for parsing {module} {fileset} logs", + "processors": [ + ], + "on_failure" : [{ + "set" : { + "field" : "error", + "value" : "{{ _ingest.on_failure_message }}" + } + }] +} diff --git a/vendor/github.com/elastic/beats/filebeat/scripts/module/fileset/manifest.yml b/vendor/github.com/elastic/beats/filebeat/scripts/module/fileset/manifest.yml new file mode 100644 index 00000000..2982d637 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/scripts/module/fileset/manifest.yml @@ -0,0 +1,14 @@ +module_version: 1.0 + +vars: + paths: + default: + - /example/path* + os.darwin: + - /usr/local/example/path* + os.windows: + - "c:/example/path*" + +ingest_pipeline: ingest/pipeline.json +prospectors: + - config/{fileset}.yml diff --git a/vendor/github.com/elastic/beats/filebeat/tests/system/filebeat.py b/vendor/github.com/elastic/beats/filebeat/tests/system/filebeat.py index c6db2731..ebc33f6f 100644 --- a/vendor/github.com/elastic/beats/filebeat/tests/system/filebeat.py +++ b/vendor/github.com/elastic/beats/filebeat/tests/system/filebeat.py @@ -44,3 +44,14 @@ class BaseTest(TestCase): return tmp_entry + def assert_fields_are_documented(self, evt): + """ + Assert that all keys present in evt are documented in fields.yml. + This reads from the global fields.yml, means `make collect` has to be run before the check. + """ + expected_fields, dict_fields = self.load_fields() + flat = self.flatten_object(evt, dict_fields) + + for key in flat.keys(): + if key not in expected_fields: + raise Exception("Key '{}' found in event is not documented!".format(key)) diff --git a/vendor/github.com/elastic/beats/filebeat/tests/system/test_crawler.py b/vendor/github.com/elastic/beats/filebeat/tests/system/test_crawler.py index f56e0c0d..98127419 100644 --- a/vendor/github.com/elastic/beats/filebeat/tests/system/test_crawler.py +++ b/vendor/github.com/elastic/beats/filebeat/tests/system/test_crawler.py @@ -761,6 +761,9 @@ class Test(BaseTest): """ Checks that filebeat handles files without reading permission well """ + if os.name != "nt" and os.geteuid() == 0: + # root ignores permission flags, so we have to skip the test + raise SkipTest self.render_config_template( path=os.path.abspath(self.working_dir) + "/log/*", diff --git a/vendor/github.com/elastic/beats/filebeat/tests/system/test_modules.py b/vendor/github.com/elastic/beats/filebeat/tests/system/test_modules.py new file mode 100644 index 00000000..95b84b31 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/tests/system/test_modules.py @@ -0,0 +1,100 @@ +from filebeat import BaseTest +from beat.beat import INTEGRATION_TESTS +import os +import unittest +import glob +import subprocess +from elasticsearch import Elasticsearch +import json +import logging + + +class Test(BaseTest): + def init(self): + self.elasticsearch_url = self.get_elasticsearch_url() + print("Using elasticsearch: {}".format(self.elasticsearch_url)) + self.es = Elasticsearch([self.elasticsearch_url]) + logging.getLogger("urllib3").setLevel(logging.WARNING) + logging.getLogger("elasticsearch").setLevel(logging.ERROR) + + self.modules_path = os.path.abspath(self.working_dir + + "/../../../../module") + + self.filebeat = os.path.abspath(self.working_dir + + "/../../../../filebeat.py") + + # @unittest.skipUnless(INTEGRATION_TESTS, "integration test") + @unittest.skip("modules disabled in 5.2") + def test_modules(self): + self.init() + modules = os.getenv("TESTING_FILEBEAT_MODULES") + if modules: + modules = modules.split(",") + else: + modules = os.listdir(self.modules_path) + + for module in modules: + path = os.path.join(self.modules_path, module) + filesets = [name for name in os.listdir(path) if + os.path.isfile(os.path.join(path, name, + "manifest.yml"))] + + for fileset in filesets: + test_files = glob.glob(os.path.join(self.modules_path, module, + fileset, "test", "*.log")) + for test_file in test_files: + self.run_on_file( + module=module, + fileset=fileset, + test_file=test_file) + + def run_on_file(self, module, fileset, test_file): + print("Testing {}/{} on {}".format(module, fileset, test_file)) + + index_name = "test-filebeat-modules" + try: + self.es.indices.delete(index=index_name) + except: + pass + + cmd = [ + self.filebeat, + "--once", + "--modules={}".format(module), + "-M", "{module}.{fileset}.paths={test_file}".format( + module=module, fileset=fileset, test_file=test_file), + "--es", self.elasticsearch_url, + "--index", index_name, + "--registry", self.working_dir + "/registry" + ] + output = open(os.path.join(self.working_dir, "output.log"), "ab") + subprocess.Popen(cmd, + stdin=None, + stdout=output, + stderr=subprocess.STDOUT, + bufsize=0).wait() + + # Make sure index exists + self.wait_until(lambda: self.es.indices.exists(index_name)) + + self.es.indices.refresh(index=index_name) + res = self.es.search(index=index_name, + body={"query": {"match_all": {}}}) + objects = [o["_source"] for o in res["hits"]["hits"]] + assert len(objects) > 0 + for obj in objects: + self.assert_fields_are_documented(obj) + + if os.path.exists(test_file + "-expected.json"): + with open(test_file + "-expected.json", "r") as f: + expected = json.load(f) + assert len(expected) == len(objects) + for ev in expected: + found = False + for obj in objects: + if ev["_source"][module] == obj[module]: + found = True + break + if not found: + raise Exception("The following expected object was" + + " not found: {}".format(obj)) diff --git a/vendor/github.com/elastic/beats/filebeat/tests/system/test_registrar.py b/vendor/github.com/elastic/beats/filebeat/tests/system/test_registrar.py index 159fe810..04984139 100644 --- a/vendor/github.com/elastic/beats/filebeat/tests/system/test_registrar.py +++ b/vendor/github.com/elastic/beats/filebeat/tests/system/test_registrar.py @@ -682,12 +682,12 @@ class Test(BaseTest): # Wait until states are removed from prospectors self.wait_until( lambda: self.log_contains_count( - "State removed for") == 4, + "State removed for") >= 3, max_timeout=15) filebeat.check_kill_and_wait() - # Check that the first to files were removed from the registry + # Check that the first two files were removed from the registry data = self.get_registry() assert len(data) == 1 @@ -1038,7 +1038,7 @@ class Test(BaseTest): self.render_config_template( path=os.path.abspath(self.working_dir) + "/log/test.log", clean_inactive="20s", - ignore_older="5s" + ignore_older="15s" ) os.mkdir(self.working_dir + "/log/") @@ -1092,7 +1092,7 @@ class Test(BaseTest): self.render_config_template( path=os.path.abspath(self.working_dir) + "/log/test file.log", clean_inactive="20s", - ignore_older="5s" + ignore_older="15s" ) os.mkdir(self.working_dir + "/log/") diff --git a/vendor/github.com/elastic/beats/generate/beat/README.md b/vendor/github.com/elastic/beats/generate/beat/README.md new file mode 100644 index 00000000..03755e95 --- /dev/null +++ b/vendor/github.com/elastic/beats/generate/beat/README.md @@ -0,0 +1,3 @@ +# Creating your own Beat + +The Beat generator enables you to create your own Beat in a few steps. Follow the [developer guide](https://www.elastic.co/guide/en/beats/libbeat/current/new-beat.html) on how to do it. diff --git a/vendor/github.com/elastic/beats/generate/beat/{beat}/NOTICE b/vendor/github.com/elastic/beats/generate/beat/{beat}/NOTICE new file mode 100644 index 00000000..3898d85d --- /dev/null +++ b/vendor/github.com/elastic/beats/generate/beat/{beat}/NOTICE @@ -0,0 +1,5 @@ +{beat} +Copyright 2017 {full_name} + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). diff --git a/vendor/github.com/elastic/beats/generate/beat/{{cookiecutter.beat}}/Makefile b/vendor/github.com/elastic/beats/generate/beat/{{cookiecutter.beat}}/Makefile index 871109b1..283af7a4 100644 --- a/vendor/github.com/elastic/beats/generate/beat/{{cookiecutter.beat}}/Makefile +++ b/vendor/github.com/elastic/beats/generate/beat/{{cookiecutter.beat}}/Makefile @@ -5,6 +5,7 @@ TEST_ENVIRONMENT=false ES_BEATS?=./vendor/github.com/elastic/beats GOPACKAGES=$(shell glide novendor) PREFIX?=. +NOTICE_FILE=NOTICE # Path to the libbeat Makefile -include $(ES_BEATS)/libbeat/scripts/Makefile diff --git a/vendor/github.com/elastic/beats/generate/metricbeat/metricset/README.md b/vendor/github.com/elastic/beats/generate/metricbeat/metricset/README.md new file mode 100644 index 00000000..556d830e --- /dev/null +++ b/vendor/github.com/elastic/beats/generate/metricbeat/metricset/README.md @@ -0,0 +1,5 @@ +# Creating a Beat based on Metricbeat + +The metricset Beat generator enables you to create a Beat that uses Metricbeat as a library and has your +own metricsets. For more details on how to create your own metricbeat check out the [developer guide](https://www.elastic.co/guide/en/beats/metricbeat/current/creating-beat-from-metricbeat.html). + diff --git a/vendor/github.com/elastic/beats/generate/metricbeat/metricset/{{cookiecutter.beat}}/Makefile b/vendor/github.com/elastic/beats/generate/metricbeat/metricset/{{cookiecutter.beat}}/Makefile index 95d755ce..fcd8688e 100644 --- a/vendor/github.com/elastic/beats/generate/metricbeat/metricset/{{cookiecutter.beat}}/Makefile +++ b/vendor/github.com/elastic/beats/generate/metricbeat/metricset/{{cookiecutter.beat}}/Makefile @@ -5,6 +5,7 @@ TEST_ENVIRONMENT=false ES_BEATS?=./vendor/github.com/elastic/beats GOPACKAGES=$(shell glide novendor) PREFIX?=. +NOTICE_FILE=NOTICE # Path to the libbeat Makefile -include $(ES_BEATS)/metricbeat/Makefile diff --git a/vendor/github.com/elastic/beats/generate/metricbeat/metricset/{{cookiecutter.beat}}/NOTICE b/vendor/github.com/elastic/beats/generate/metricbeat/metricset/{{cookiecutter.beat}}/NOTICE new file mode 100644 index 00000000..19a8b48b --- /dev/null +++ b/vendor/github.com/elastic/beats/generate/metricbeat/metricset/{{cookiecutter.beat}}/NOTICE @@ -0,0 +1,5 @@ +{{cookiecutter.beat}} +Copyright 2017 {{cookiecutter.full_name}} + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). diff --git a/vendor/github.com/elastic/beats/glide.yaml b/vendor/github.com/elastic/beats/glide.yaml index d3c53fbc..1adbdb3b 100644 --- a/vendor/github.com/elastic/beats/glide.yaml +++ b/vendor/github.com/elastic/beats/glide.yaml @@ -25,7 +25,9 @@ import: subpackages: - /difflib - package: github.com/elastic/gosigar - version: 0ba14c25cb14e2d939b563ca49a504c3117680c6 + version: 171a3c9e31dde9688c154ba94be6cd5d8a78bf64 +- package: github.com/elastic/procfs + version: abf152e5f3e97f2fafac028d2cc06c1feb87ffa5 - package: github.com/samuel/go-parser version: ca8abbf65d0e61dedf061f98bd3850f250e27539 - package: github.com/samuel/go-thrift @@ -62,7 +64,7 @@ import: - package: github.com/miekg/dns version: 5d001d020961ae1c184f9f8152fdc73810481677 - package: github.com/Shopify/sarama - version: enh/offset-replica-id + version: v1.11/enh/offset-replica-id repo: https://github.com/urso/sarama - package: github.com/rcrowley/go-metrics version: ab2277b1c5d15c3cba104e9cbddbdfc622df5ad8 @@ -81,7 +83,7 @@ import: - package: github.com/dustin/go-humanize version: 499693e27ee0d14ffab67c31ad065fdb3d34ea75 - package: github.com/elastic/go-ucfg - version: v0.3.7 + version: v0.4.5 - package: github.com/armon/go-socks5 version: 3a873e99f5400ad7706e464e905ffcc94b3ff247 - package: github.com/pkg/errors @@ -107,3 +109,7 @@ import: version: v0.9.0 - package: github.com/gorhill/cronexpr version: f0984319b44273e83de132089ae42b1810f4933b +- package: github.com/pierrec/lz4 + version: 5c9560bfa9ace2bf86080bf40d46b34ae44604df +- package: github.com/pierrec/xxHash + version: 5a004441f897722c627870a981d02b29924215fa diff --git a/vendor/github.com/elastic/beats/heartbeat/Makefile b/vendor/github.com/elastic/beats/heartbeat/Makefile index 30fdff6f..a8fffb14 100644 --- a/vendor/github.com/elastic/beats/heartbeat/Makefile +++ b/vendor/github.com/elastic/beats/heartbeat/Makefile @@ -1,5 +1,5 @@ BEATNAME=heartbeat -BEAT_DESCRIPTION?=Ping remote services for availablity and log results to Elasticsearch or send to Logstash. +BEAT_DESCRIPTION?=Ping remote services for availability and log results to Elasticsearch or send to Logstash. SYSTEM_TESTS=false TEST_ENVIRONMENT=false diff --git a/vendor/github.com/elastic/beats/heartbeat/_meta/beat.full.yml b/vendor/github.com/elastic/beats/heartbeat/_meta/beat.full.yml new file mode 100644 index 00000000..223baec7 --- /dev/null +++ b/vendor/github.com/elastic/beats/heartbeat/_meta/beat.full.yml @@ -0,0 +1,198 @@ +################### Heartbeat Configuration Example ######################### + +# This file is a full configuration example documenting all non-deprecated +# options in comments. For a shorter configuration example, that contains +# only some common options, please see heartbeat.yml in the same directory. +# +# You can find the full configuration reference here: +# https://www.elastic.co/guide/en/beats/heartbeat/index.html + +############################# Heartbeat ###################################### + +# Configure monitors +heartbeat.monitors: +- type: icmp # monitor type `icmp` (requires root) uses ICMP Echo Request to ping + # configured hosts + + # Monitor name used for job name and document type. + #name: icmp + + # Enable/Disable monitor + #enabled: true + + # Configure task schedule using cron-like syntax + schedule: '*/5 * * * * * *' # exactly every 5 seconds like 10:00:00, 10:00:05, ... + + # List of hosts to ping + hosts: ["localhost"] + + # Configure IP protocol types to ping on if hostnames are configured. + # Ping all resolvable IPs if `mode` is `all`, or only one IP if `mode` is `any`. + ipv4: true + ipv6: true + mode: any + + # Configure file json file to be watched for changes to the monitor: + #watch.poll_file: + # Path to check for updates. + #path: + + # Interval between file file changed checks. + #interval: 5s + + # Total running time per ping test. + timeout: 16s + + # Waiting duration until another ICMP Echo Request is emitted. + wait: 1s + +- type: tcp # monitor type `tcp`. Connect via TCP and optionally verify endpoint + # by sending/receiving a custom payload + + # Monitor name used for job name and document type + #name: tcp + + # Enable/Disable monitor + #enabled: true + + # Configure task schedule + schedule: '@every 5s' # every 5 seconds from start of beat + + # configure hosts to ping. + # Entries can be: + # - plain host name or IP like `localhost`: + # Requires ports configs to be checked. If ssl is configured, + # a SSL/TLS based connection will be established. Otherwise plain tcp connection + # will be established + # - hostname + port like `localhost:12345`: + # Connect to port on given host. If ssl is configured, + # a SSL/TLS based connection will be established. Otherwise plain tcp connection + # will be established + # - full url syntax. `scheme://:[port]`. The `` can be one of + # `tcp`, `plain`, `ssl` and `tls`. If `tcp`, `plain` is configured, a plain + # tcp connection will be established, even if ssl is configured. + # Using `tls`/`ssl`, an SSL connection is established. If no ssl is configured, + # system defaults will be used (not supported on windows). + # If `port` is missing in url, the ports setting is required. + hosts: ["localhost:9200"] + + # Configure IP protocol types to ping on if hostnames are configured. + # Ping all resolvable IPs if `mode` is `all`, or only one IP if `mode` is `any`. + ipv4: true + ipv6: true + mode: any + + # Configure file json file to be watched for changes to the monitor: + #watch.poll_file: + # Path to check for updates. + #path: + + # Interval between file file changed checks. + #interval: 5s + + # List of ports to ping if host does not contain a port number + # ports: [80, 9200, 5044] + + # Total test connection and data exchange timeout + #timeout: 16s + + # Optional payload string to send to remote and expected answer. If none is + # configured, the endpoint is expected to be up if connection attempt was + # successful. If only `send_string` is configured, any response will be + # accepted as ok. If only `receive_string` is configured, no payload will be + # send, but client expects to receive expected payload on connect. + #check: + #send: '' + #receive: '' + + # SOCKS5 proxy url + # proxy_url: '' + + # Resolve hostnames locally instead on SOCKS5 server: + #proxy_use_local_resolver: false + + # TLS/SSL connection settings: + #ssl: + # Certificate Authorities + #certificate_authorities: [''] + + # Required TLS protocols + #supported_protocols: ["TLSv1.0", "TLSv1.1", "TLSv1.2"] + +- type: http # monitor type `http`. Connect via HTTP an optionally verify response + + # Monitor name used for job name and document type + #name: http + + # Enable/Disable monitor + #enabled: true + + # Configure task schedule + schedule: '@every 5s' # every 5 seconds from start of beat + + # Configure URLs to ping + urls: ["http://localhost:9200"] + + # Configure IP protocol types to ping on if hostnames are configured. + # Ping all resolvable IPs if `mode` is `all`, or only one IP if `mode` is `any`. + ipv4: true + ipv6: true + mode: any + + # Configure file json file to be watched for changes to the monitor: + #watch.poll_file: + # Path to check for updates. + #path: + + # Interval between file file changed checks. + #interval: 5s + + # Optional HTTP proxy url. If not set HTTP_PROXY environment variable will be used. + #proxy_url: '' + + # Total test connection and data exchange timeout + #timeout: 16s + + # Optional Authentication Credentials + #username: '' + #password: '' + + # TLS/SSL connection settings for use with HTTPS endpoint. If not configured + # system defaults will be used. + #ssl: + # Certificate Authorities + #certificate_authorities: [''] + + # Required TLS protocols + #supported_protocols: ["TLSv1.0", "TLSv1.1", "TLSv1.2"] + + # Request settings: + #check.request: + # Configure HTTP method to use. Only 'HEAD', 'GET' and 'POST' methods are allowed. + #method: "GET" + + # Dictionary of additional HTTP headers to send: + #headers: + + # Optional request body content + #body: + + # Expected response settings + #check.response: + # Expected status code. If not configured or set to 0 any status code not + # being 404 is accepted. + #status: 0 + + # Required response headers. + #headers: + + # Required response contents. + #body: + +heartbeat.scheduler: + # Limit number of concurrent tasks executed by heartbeat. The task limit if + # disabled if set to 0. The default is 0. + #limit: 0 + + # Set the scheduler it's timezone + #location: '' diff --git a/vendor/github.com/elastic/beats/heartbeat/_meta/beat.yml b/vendor/github.com/elastic/beats/heartbeat/_meta/beat.yml index 740eefda..b6347419 100644 --- a/vendor/github.com/elastic/beats/heartbeat/_meta/beat.yml +++ b/vendor/github.com/elastic/beats/heartbeat/_meta/beat.yml @@ -1,191 +1,23 @@ ################### Heartbeat Configuration Example ######################### +# This file is an example configuration file highlighting only some common options. +# The heartbeat.full.yml file in the same directory contains all the supported options +# with detailed comments. You can use it for reference. +# +# You can find the full configuration reference here: +# https://www.elastic.co/guide/en/beats/heartbeat/index.html + ############################# Heartbeat ###################################### # Configure monitors heartbeat.monitors: -- type: icmp # monitor type `icmp` (requires root) uses ICMP Echo Request to ping - # configured hosts +- type: http - # Monitor name used for job name and document type. - #name: icmp - - # Enable/Disable monitor - #enabled: true - - # Configure task schedule using cron-like syntax - schedule: '*/5 * * * * * *' # exactly every 5 seconds like 10:00:00, 10:00:05, ... - - # List of hosts to ping - hosts: ["localhost"] - - # Configure IP protocol types to ping on if hostnames are configured. - # Ping all resolvable IPs if `mode` is `all`, or only one IP if `mode` is `any`. - ipv4: true - ipv6: true - mode: any - - # Configure file json file to be watched for changes to the monitor: - #watch.poll_file: - # Path to check for updates. - #path: - - # Interval between file file changed checks. - #interval: 5s - - # Total running time per ping test. - timeout: 16s - - # Waiting duration until another ICMP Echo Request is emitted. - wait: 1s - -- type: tcp # monitor type `tcp`. Connect via TCP and optionally verify endpoint - # by sending/receiving a custom payload - - # Monitor name used for job name and document type - #name: tcp - - # Enable/Disable monitor - #enabled: true + # List or urls to query + urls: ["http://localhost:9200"] # Configure task schedule - schedule: '@every 5s' # every 5 seconds from start of beat - - # configure hosts to ping. - # Entries can be: - # - plain host name or IP like `localhost`: - # Requires ports configs to be checked. If ssl is configured, - # a SSL/TLS based connection will be established. Otherwise plain tcp connection - # will be established - # - hostname + port like `localhost:12345`: - # Connect to port on given host. If ssl is configured, - # a SSL/TLS based connection will be established. Otherwise plain tcp connection - # will be established - # - full url syntax. `scheme://:[port]`. The `` can be one of - # `tcp`, `plain`, `ssl` and `tls`. If `tcp`, `plain` is configured, a plain - # tcp connection will be established, even if ssl is configured. - # Using `tls`/`ssl`, an SSL connection is established. If no ssl is configured, - # system defaults will be used (not supported on windows). - # If `port` is missing in url, the ports setting is required. - hosts: ["localhost:12345"] - - # Configure IP protocol types to ping on if hostnames are configured. - # Ping all resolvable IPs if `mode` is `all`, or only one IP if `mode` is `any`. - ipv4: true - ipv6: true - mode: any - - # Configure file json file to be watched for changes to the monitor: - #watch.poll_file: - # Path to check for updates. - #path: - - # Interval between file file changed checks. - #interval: 5s - - # List of ports to ping if host does not contain a port number - # ports: [80, 9200, 5044] + schedule: '@every 10s' # Total test connection and data exchange timeout #timeout: 16s - - # Optional payload string to send to remote and expected answer. If none is - # configured, the endpoint is expected to be up if connection attempt was - # successful. If only `send_string` is configured, any response will be - # accepted as ok. If only `receive_string` is configured, no payload will be - # send, but client expects to receive expected payload on connect. - #check: - #send: '' - #receive: '' - - # SOCKS5 proxy url - # proxy_url: '' - - # Resolve hostnames locally instead on SOCKS5 server: - #proxy_use_local_resolver: false - - # TLS/SSL connection settings: - #ssl: - # Certificate Authorities - #certificate_authorities: [''] - - # Required TLS protocols - #supported_protocols: ["TLSv1.0", "TLSv1.1", "TLSv1.2"] - -- type: http # monitor type `http`. Connect via HTTP an optionally verify response - - # Monitor name used for job name and document type - #name: tcp - - # Enable/Disable monitor - #enabled: true - - # Configure task schedule - schedule: '@every 5s' # every 5 seconds from start of beat - - # Configure URLs to ping - urls: ["http://localhost:80"] - - # Configure IP protocol types to ping on if hostnames are configured. - # Ping all resolvable IPs if `mode` is `all`, or only one IP if `mode` is `any`. - ipv4: true - ipv6: true - mode: any - - # Configure file json file to be watched for changes to the monitor: - #watch.poll_file: - # Path to check for updates. - #path: - - # Interval between file file changed checks. - #interval: 5s - - # Optional HTTP proxy url. If not set HTTP_PROXY environment variable will be used. - #proxy_url: '' - - # Total test connection and data exchange timeout - #timeout: 16s - - # Optional Authentication Credentials - #username: '' - #password: '' - - # TLS/SSL connection settings for use with HTTPS endpoint. If not configured - # system defaults will be used. - #ssl: - # Certificate Authorities - #certificate_authorities: [''] - - # Required TLS protocols - #supported_protocols: ["TLSv1.0", "TLSv1.1", "TLSv1.2"] - - # Request settings: - #check.request: - # Configure HTTP method to use. Only 'HEAD', 'GET' and 'POST' methods are allowed. - #method: "GET" - - # Dictionary of additional HTTP headers to send: - #headers: - - # Optional request body content - #body: - - # Expected response settings - #check.response: - # Expected status code. If not configured or set to 0 any status code not - # being 404 is accepted. - #status: 0 - - # Required response headers. - #headers: - - # Required response contents. - #body: - -heartbeat.scheduler: - # Limit number of concurrent tasks executed by heartbeat. The task limit if - # disabled if set to 0. The default is 0. - #limit: 0 - - # Set the scheduler it's timezone - #location: '' diff --git a/vendor/github.com/elastic/beats/heartbeat/beater/heartbeat.go b/vendor/github.com/elastic/beats/heartbeat/beater/heartbeat.go index 81708f52..39cd99e2 100644 --- a/vendor/github.com/elastic/beats/heartbeat/beater/heartbeat.go +++ b/vendor/github.com/elastic/beats/heartbeat/beater/heartbeat.go @@ -23,6 +23,8 @@ type Heartbeat struct { } func New(b *beat.Beat, cfg *common.Config) (beat.Beater, error) { + logp.Warn("Beta: Heartbeat is beta software") + config := config.DefaultConfig if err := cfg.Unpack(&config); err != nil { return nil, fmt.Errorf("Error reading config file: %v", err) diff --git a/vendor/github.com/elastic/beats/heartbeat/docs/command-line.asciidoc b/vendor/github.com/elastic/beats/heartbeat/docs/command-line.asciidoc new file mode 100644 index 00000000..e04b9337 --- /dev/null +++ b/vendor/github.com/elastic/beats/heartbeat/docs/command-line.asciidoc @@ -0,0 +1,15 @@ +[[heartbeat-command-line]] +=== Command Line Options + +Heartbeat does not have any Heartbeat-specific command line options. +Instead, you configure the behaviour of Heartbeat by specifying options in +the configuration file. + +The following command line options from libbeat are also available for +Heartbeat. To use these options, you need to start Heartbeat in the +foreground. + +TIP: Run `./heartbeat -h` to see the full list of options from the command +line. + +include::../../libbeat/docs/shared-command-line.asciidoc[] diff --git a/vendor/github.com/elastic/beats/heartbeat/docs/configuring-howto.asciidoc b/vendor/github.com/elastic/beats/heartbeat/docs/configuring-howto.asciidoc new file mode 100644 index 00000000..8ca477ce --- /dev/null +++ b/vendor/github.com/elastic/beats/heartbeat/docs/configuring-howto.asciidoc @@ -0,0 +1,30 @@ +[[configuring-howto-heartbeat]] += Configuring Heartbeat + +[partintro] +-- +After following the <> in the +Getting Started, you might want to fine tune the behavior of Heartbeat. +This section describes some common use cases for changing configuration options. + +To configure Heartbeat, you edit the configuration file. For rpm and deb, +you’ll find the default configuration file at ++/etc/heartbeat/heartbeat.yml+. There's also a full example +configuration file at +/etc/heartbeat/heartbeat.full.yml+ that shows +all non-deprecated options. For mac and win, look in the archive that you +extracted. + +The following topics describe how to configure Heartbeat: + +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> + +-- + +include::reference/configuration.asciidoc[] diff --git a/vendor/github.com/elastic/beats/heartbeat/docs/configuring-logstash.asciidoc b/vendor/github.com/elastic/beats/heartbeat/docs/configuring-logstash.asciidoc new file mode 100644 index 00000000..4da66693 --- /dev/null +++ b/vendor/github.com/elastic/beats/heartbeat/docs/configuring-logstash.asciidoc @@ -0,0 +1,5 @@ +[[config-heartbeat-logstash]] +== Configuring Heartbeat to Use Logstash + +:allplatforms: +include::../../libbeat/docs/shared-logstash-config.asciidoc[] diff --git a/vendor/github.com/elastic/beats/heartbeat/docs/faq.asciidoc b/vendor/github.com/elastic/beats/heartbeat/docs/faq.asciidoc new file mode 100644 index 00000000..ca89bb4d --- /dev/null +++ b/vendor/github.com/elastic/beats/heartbeat/docs/faq.asciidoc @@ -0,0 +1,9 @@ +[[faq]] +== Frequently Asked Questions + +This section contains frequently asked questions about Heartbeat. Also check +out the +https://discuss.elastic.co/c/beats/heartbeat[Heartbeat discussion forum]. + +include::../../libbeat/docs/faq-limit-bandwidth.asciidoc[] +include::../../libbeat/docs/shared-faq.asciidoc[] diff --git a/vendor/github.com/elastic/beats/heartbeat/docs/getting-started.asciidoc b/vendor/github.com/elastic/beats/heartbeat/docs/getting-started.asciidoc new file mode 100644 index 00000000..2a483b0c --- /dev/null +++ b/vendor/github.com/elastic/beats/heartbeat/docs/getting-started.asciidoc @@ -0,0 +1,231 @@ +[[heartbeat-getting-started]] +== Getting Started With Heartbeat + +To get started with your own Heartbeat setup, install and configure these +related products: + + * Elasticsearch for storage and indexing the data. + * Kibana for the UI. + * Logstash (optional) for inserting data into Elasticsearch. + +See {libbeat}/getting-started.html[Getting Started with Beats and the Elastic Stack] +for more information. + +After installing the Elastic Stack, read the following topics to learn how to +install, configure, and run Heartbeat: + +* <> +* <> +* <> +* <> +* <> +* <> + + +[[heartbeat-installation]] +=== Step 1: Installing Heartbeat + +Unlike most Beats, which you install on edge nodes, you typically install +Heartbeat as part of monitoring service that runs on a separate machine +and possibly even outside of the network where the services that you want to +monitor are running. + +//TODO: Add a separate topic that explores deployment scenarios in more detail (like installing on a sub-network where there's a firewall etc. + +To download and install Heartbeat, use the commands that work with your +system (<> for Debian/Ubuntu, <> for Redhat/Centos/Fedora, +<> for OS X, and <> for Windows). + +[NOTE] +================================================== +If you use Apt or Yum, you can <> to update to the newest version more easily. + +See our https://www.elastic.co/downloads/beats/heartbeat[download page] for other installation options, such as 32-bit images. + +================================================== + +[[deb]] +*deb:* + +["source","sh",subs="attributes"] +---------------------------------------------------------------------- +curl -L -O {downloads}/heartbeat/heartbeat-{version}-amd64.deb +sudo dpkg -i heartbeat-{version}-amd64.deb +---------------------------------------------------------------------- + +[[rpm]] +*rpm:* + +["source","sh",subs="attributes"] +---------------------------------------------------------------------- +curl -L -O {downloads}/heartbeat/heartbeat-{version}-x86_64.rpm +sudo rpm -vi heartbeat-{version}-x86_64.rpm +---------------------------------------------------------------------- + +[[mac]] +*mac:* + +["source","sh",subs="attributes"] +------------------------------------------------ +curl -L -O {downloads}/heartbeat/heartbeat-{version}-darwin-x86_64.tar.gz +tar xzvf heartbeat-{version}-darwin-x86_64.tar.gz +------------------------------------------------ + +[[win]] +*win:* + +. Download the Heartbeat Windows zip file from the +https://www.elastic.co/downloads/beats/heartbeat[downloads page]. + +. Extract the contents of the zip file into `C:\Program Files`. + +. Rename the +heartbeat--windows+ directory to +Heartbeat+. + +. Open a PowerShell prompt as an Administrator (right-click the PowerShell icon +and select *Run As Administrator*). If you are running Windows XP, you may need +to download and install PowerShell. + +. Run the following commands to install Heartbeat as a Windows service: ++ +["source","sh",subs="attributes"] +---------------------------------------------------------------------- +PS > cd 'C:\Program Files\Heartbeat' +PS C:\Program Files\Heartbeat> .\install-service-heartbeat.ps1 +---------------------------------------------------------------------- + +NOTE: If script execution is disabled on your system, you need to set the +execution policy for the current session to allow the script to run. For +example: ++PowerShell.exe -ExecutionPolicy UnRestricted -File .\install-service-heartbeat.ps1+. + +Before starting Heartbeat, you should look at the configuration options in +the configuration file, for example +C:\Program Files\Heartbeat\heartbeat.yml+ +or +/etc/heartbeat/heartbeat.yml+. For more information about these +options, see <>. + +[[heartbeat-configuration]] +=== Step 2: Configuring Heartbeat + +To configure Heartbeat, you edit the configuration file. For rpm and deb, +you'll find the configuration file at +/etc/heartbeat/heartbeat.yml+. +For mac and win, look in the archive that you just extracted. There’s also a +full example configuration file called `heartbeat.full.yml` that shows all +non-deprecated options. + +Heartbeat provides monitors to check the status of hosts at set intervals. +You configure each monitor individually. Heartbeat currently provides monitors +for ICMP, TCP, and HTTP (see <> for more about these +monitors). Here is an example that configures Heartbeat to use an `icmp` +monitor: + +[source,yaml] +---------------------------------------------------------------------- +heartbeat.monitors: +- type: icmp + schedule: '*/5 * * * * * *' + hosts: ["myhost"] +output.elasticsearch: + hosts: ["myhost:9200"] +---------------------------------------------------------------------- + +To configure Heartbeat: + +. Specify the list of monitors that you want to enable. Each item in the list +begins with a dash (-). The following example configures Heartbeat to use two +monitors, an `icmp` monitor and a `tcp` monitor: ++ +[source,yaml] +---------------------------------------------------------------------- +heartbeat.monitors: +- type: icmp + schedule: '*/5 * * * * * *' <1> + hosts: ["myhost"] +- type: tcp + schedule: '@every 5s' <2> + hosts: ["myhost:12345"] + mode: any <3> +---------------------------------------------------------------------- +<1> The `icmp` monitor is scheduled to run exactly every 5 seconds (10:00:00, +10:00:05, and so on). The `schedule` option uses a cron-like syntax based on +https://github.com/gorhill/cronexpr#implementation[this `cronexpr` implementation]. +<2> The `tcp` monitor is set to run every 5 seconds from the time when Heartbeat +was started. Heartbeat adds the `@every` keyword to the syntax provided by the +`cronexpr` package. +<3> The `mode` specifies whether to ping one IP (`any`) or all resolvable IPs +(`all`). ++ +See <> for a full description of each +configuration option. + +. If you are sending output to Elasticsearch, set the IP address and port where +Heartbeat can find the Elasticsearch installation: ++ +[source,yaml] +---------------------------------------------------------------------- +output.elasticsearch: + hosts: ["192.168.1.42:9200"] +---------------------------------------------------------------------- ++ +If you are sending output to Logstash, see <> +instead. + +TIP: To test your configuration file, change to the directory where the +Heartbeat binary is installed, and run Heartbeat in the foreground with +the following options specified: +./heartbeat -configtest -e+. Make sure +your config files are in the path expected by Heartbeat +(see <>). If you installed from DEB or RPM packages, run ++./heartbeat.sh -configtest -e+. + +[[heartbeat-template]] +=== Step 3: Loading the Index Template in Elasticsearch + +:allplatforms: +include::../../libbeat/docs/shared-template-load.asciidoc[] + +[[heartbeat-starting]] +=== Step 4: Starting Heartbeat + +Start Heartbeat by issuing the appropriate command for your platform. + +NOTE: If you use an init.d script to start Heartbeat on deb or rpm, you can't +specify command line flags (see <>). To specify flags, +start Heartbeat in the foreground. + +*deb:* + +["source","sh",subs="attributes"] +---------------------------------------------------------------------- +sudo /etc/init.d/ start +---------------------------------------------------------------------- + +*rpm:* + +["source","sh",subs="attributes"] +---------------------------------------------------------------------- +sudo /etc/init.d/heartbeat start +---------------------------------------------------------------------- + +*mac:* + +["source","sh",subs="attributes"] +---------------------------------------------------------------------- +sudo ./heartbeat -e -c heartbeat.yml -d "publish" +---------------------------------------------------------------------- + +*win:* + +["source","sh",subs="attributes"] +---------------------------------------------------------------------- +PS C:\Program Files\Heartbeat> Start-Service heartbeat +---------------------------------------------------------------------- + +By default, Windows log files are stored in +C:\ProgramData\heartbeat\Logs+. + +Heartbeat is now ready to check the status of your services and send +events to your defined output. + +//TODO: Add content about sample dashboards when the dashboards are available. + +//:allplatforms: + +//include::../../libbeat/docs/dashboards.asciidoc[] diff --git a/vendor/github.com/elastic/beats/heartbeat/docs/heartbeat-devguide.asciidoc b/vendor/github.com/elastic/beats/heartbeat/docs/heartbeat-devguide.asciidoc new file mode 100644 index 00000000..137ce0aa --- /dev/null +++ b/vendor/github.com/elastic/beats/heartbeat/docs/heartbeat-devguide.asciidoc @@ -0,0 +1,27 @@ +[[heartbeat-developer-guide]] += Heartbeat Developer Guide + +[partintro] +-- + +Heartbeat pings your remote services periodically and determines whether they +are available. As a developer, you can use Heartbeat in two different ways: + +* Extend Heartbeat directly +* Create your own Beat and use Heartbeat as a library + +We recommend that you start by creating your own Beat to keep the development of +your own monintors independent of Heartbeat. At a later stage, if you decide to +add a monitor to Heartbeat, you can reuse the code without making additional changes. + +The following topics describe how to contribute to Heartbeat by adding new monitors +and new Beats based on Heartbeat: + +-- + +[[heartbeat-dev-overview]] +== Overview + +Heartbeat consists of monitors that.... + +//TODO: Add developer guide content when Heartbeat is further along \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/heartbeat/docs/heartbeat-filtering.asciidoc b/vendor/github.com/elastic/beats/heartbeat/docs/heartbeat-filtering.asciidoc new file mode 100644 index 00000000..2f3caa29 --- /dev/null +++ b/vendor/github.com/elastic/beats/heartbeat/docs/heartbeat-filtering.asciidoc @@ -0,0 +1,4 @@ +[[filtering-and-enhancing-data]] +== Filtering and Enhancing the Exported Data + +include::../../libbeat/docs/processors.asciidoc[] diff --git a/vendor/github.com/elastic/beats/heartbeat/docs/how-heartbeat-works.asciidoc b/vendor/github.com/elastic/beats/heartbeat/docs/how-heartbeat-works.asciidoc new file mode 100644 index 00000000..cbaf7b74 --- /dev/null +++ b/vendor/github.com/elastic/beats/heartbeat/docs/how-heartbeat-works.asciidoc @@ -0,0 +1,10 @@ +[[how-heartbeat-works]] +== How Heartbeat Works + +In this topic, you learn about the key building blocks of Heartbeat and how +they work together. Understanding these concepts will help you make informed +decisions about configuring Heartbeat for specific use cases. + +//TODO: Add details to this topic when the architecture is more stable. + + diff --git a/vendor/github.com/elastic/beats/heartbeat/docs/images/kibana-created-indexes.png b/vendor/github.com/elastic/beats/heartbeat/docs/images/kibana-created-indexes.png new file mode 100644 index 00000000..17949915 Binary files /dev/null and b/vendor/github.com/elastic/beats/heartbeat/docs/images/kibana-created-indexes.png differ diff --git a/vendor/github.com/elastic/beats/heartbeat/docs/images/kibana-navigation-vis.png b/vendor/github.com/elastic/beats/heartbeat/docs/images/kibana-navigation-vis.png new file mode 100644 index 00000000..d80f3505 Binary files /dev/null and b/vendor/github.com/elastic/beats/heartbeat/docs/images/kibana-navigation-vis.png differ diff --git a/vendor/github.com/elastic/beats/heartbeat/docs/index.asciidoc b/vendor/github.com/elastic/beats/heartbeat/docs/index.asciidoc index 60df3049..8470e621 100644 --- a/vendor/github.com/elastic/beats/heartbeat/docs/index.asciidoc +++ b/vendor/github.com/elastic/beats/heartbeat/docs/index.asciidoc @@ -1,5 +1,59 @@ -= Heartbeat Docs += Heartbeat Reference -Welcome to the Heartbeat documentation. +include::../../libbeat/docs/version.asciidoc[] +:libbeat: http://www.elastic.co/guide/en/beats/libbeat/{doc-branch} +:packetbeat: http://www.elastic.co/guide/en/beats/packetbeat/{doc-branch} +:metricbeat: http://www.elastic.co/guide/en/beats/metricbeat/{doc-branch} +:filebeat: http://www.elastic.co/guide/en/beats/filebeat/{doc-branch} +:winlogbeat: http://www.elastic.co/guide/en/beats/winlogbeat/{doc-branch} +:elasticsearch: https://www.elastic.co/guide/en/elasticsearch/reference/{doc-branch} +:securitydoc: https://www.elastic.co/guide/en/x-pack/5.2 +:downloads: https://artifacts.elastic.co/downloads/beats +:version: {stack-version} +:beatname_lc: heartbeat +:beatname_uc: Heartbeat +:security: X-Pack Security +include::./overview.asciidoc[] + +include::./getting-started.asciidoc[] + +include::./command-line.asciidoc[] + +include::../../libbeat/docs/shared-directory-layout.asciidoc[] + +include::../../libbeat/docs/repositories.asciidoc[] + +// +//include::./upgrading.asciidoc[] + +// +//include::./how-heartbeat-works.asciidoc[] + +include::./configuring-howto.asciidoc[] + +include::./heartbeat-filtering.asciidoc[] + +include::../../libbeat/docs/shared-config-ingest.asciidoc[] + +//points to shared topic because configuring-logstash.asciidoc is just a wrapper +include::./configuring-logstash.asciidoc[] + +include::../../libbeat/docs/shared-env-vars.asciidoc[] + +:allplatforms: +include::../../libbeat/docs/yaml.asciidoc[] + +include::../../libbeat/docs/regexp.asciidoc[] + +include::./fields.asciidoc[] + +include::./securing-heartbeat.asciidoc[] + +include::./troubleshooting.asciidoc[] + +include::./faq.asciidoc[] + +// +//include::./heartbeat-devguide.asciidoc[] diff --git a/vendor/github.com/elastic/beats/heartbeat/docs/overview.asciidoc b/vendor/github.com/elastic/beats/heartbeat/docs/overview.asciidoc new file mode 100644 index 00000000..a259c46b --- /dev/null +++ b/vendor/github.com/elastic/beats/heartbeat/docs/overview.asciidoc @@ -0,0 +1,34 @@ +[[heartbeat-overview]] +== Overview + +Heartbeat is a lightweight daemon that you install on a remote server +to periodically check the status of your services and determine whether they are +available. Unlike {metricbeat}/index.html[Metricbeat], which only tells you if +your servers are up or down, Heartbeat tells you whether your services are +reachable. + +Heartbeat is useful when you need to verify that you're meeting your service +level agreements for service uptime. It's also useful for other scenarios, such +as security use cases, when you need to verify that no one from the outside can +access services on your private enterprise server. + +You can configure Heartbeat to ping all DNS-resolvable IP addresses for a +specified hostname. That way, you can check all services that are load-balanced +to see if they are available. + +When you configure Heartbeat, you specify monitors that identify the +hostnames that you want to check. Each monitor runs based on the schedule that +you specify. For example, you can configure one monitor to run every 10 +minutes, and a different monitor to run between the hours of 9:00 and 17:00. + +Heartbeat currently supports monitors for checking hosts via: + +* ICMP (v4 and v6) Echo Requests. Use the `icmp` monitor when you simply want to +check whether a service is available. This monitor requires root access. +* TCP. Use the `tcp` monitor to connect via TCP. You can optionally configure this +monitor to verify the endpoint by sending and/or receiving a custom payload. +* HTTP. Use the `http` monitor to connect via HTTP. You can optionally configure +this monitor to verify that the service returns the expected response, such as a +specific status code, response header, or content. + +The `tcp` and `http` monitors both support SSL/TLS and some proxy settings. diff --git a/vendor/github.com/elastic/beats/heartbeat/docs/page_header.html b/vendor/github.com/elastic/beats/heartbeat/docs/page_header.html new file mode 100644 index 00000000..6bb0e637 --- /dev/null +++ b/vendor/github.com/elastic/beats/heartbeat/docs/page_header.html @@ -0,0 +1,4 @@ +This functionality is in beta and is subject to change. The design and +code is considered to be less mature than official GA features. Elastic will +take a best effort approach to fix any issues, but beta features are not +subject to the support SLA of official GA features. diff --git a/vendor/github.com/elastic/beats/heartbeat/docs/reference/configuration.asciidoc b/vendor/github.com/elastic/beats/heartbeat/docs/reference/configuration.asciidoc new file mode 100644 index 00000000..3ff96eb9 --- /dev/null +++ b/vendor/github.com/elastic/beats/heartbeat/docs/reference/configuration.asciidoc @@ -0,0 +1,29 @@ + +[[heartbeat-configuration-details]] +== Configuration Options (Reference) + +Before modifying configuration settings, make sure you've completed the +<> in the Getting Started. + +The Heartbeat configuration file, +heartbeat.yml+, uses http://yaml.org/[YAML] for its syntax. +The configuration options are described in the following sections. After changing +configuration settings, you need to restart Heartbeat to pick up the changes. + +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> + +include::configuration/heartbeat-options.asciidoc[] + + + + diff --git a/vendor/github.com/elastic/beats/heartbeat/docs/reference/configuration/heartbeat-options.asciidoc b/vendor/github.com/elastic/beats/heartbeat/docs/reference/configuration/heartbeat-options.asciidoc new file mode 100644 index 00000000..c732beef --- /dev/null +++ b/vendor/github.com/elastic/beats/heartbeat/docs/reference/configuration/heartbeat-options.asciidoc @@ -0,0 +1,458 @@ +[[configuration-heartbeat-options]] +=== Heartbeat Configuration + +The `heartbeat` section of the +heartbeat.yml+ config file specifies the +list of `monitors` that Heartbeat uses to check your remote hosts to +determine if they are available. Each `monitor` item begins with a dash (-) and +specifies the type of monitor to use, the hosts to check, and other settings +that control Heartbeat behavior. + +The following example configures three monitors, `icmp`, `tcp`, and `http`, and +demonstrates how to use TCP Echo and HTTP response verification: + +[source,yaml] +---------------------------------------------------------------------- +heartbeat.monitors: +- type: icmp + schedule: '*/5 * * * * * *' + hosts: ["myhost"] +- type: tcp + schedule: '@every 5s' + hosts: ["myhost:7"] # default TCP Echo Protocol + check.send: "Check" + check.receive: "Check" +- type: http + schedule: '@every 5s' + urls: ["http://localhost:80/service/status"] + check.response.status: 200 +heartbeat.scheduler: + limit: 10 +---------------------------------------------------------------------- + +[[monitor-options]] +==== Monitor Options + +You can specify the following options in the `monitors` section of the ++heartbeat.yml+ config file. These options are the same for all monitors. +Each monitor type has additional configuration options that are specific to that +monitor type. + + +[[monitor-type]] +===== type + +The type of monitor to run. One of: + +* `icmp`: Uses an ICMP (v4 and v6) Echo Request to ping the configured hosts. +Requires root access. See <>. +* `tcp`: Connects via TCP and optionally verifies the endpoint by sending and/or +receiving a custom payload. See <>. +* `http`: Connects via HTTP and optionally verifies that the host returns the +expected response. See <>. + +The `tcp` and `http` monitor types both support SSL/TLS and some proxy +settings. + + +[[monitor-name]] +===== name + +The monitor name. This value appears in the <> +under the `monitor` field as the job name and the `type` field as the job type. + +[[monitor-enabled]] +===== enabled + +A Boolean value that specifies whether the module is enabled. If the `enabled` +option is missing from the configuration block, the module is enabled by +default. + +[[monitor-schedule]] +===== schedule + +A cron-like expression that specifies the task schedule. For example: + +* `*/5 * * * * * *` runs the task every 5 seconds (for example, at 10:00:00, +10:00:05, and so on). +* `@every 5s` runs the task every 5 seconds from the time when Heartbeat was +started. + +The `schedule` option uses a cron-like syntax based on https://github.com/gorhill/cronexpr#implementation[this `cronexpr` implementation], +but adds the `@every` keyword. + +[[monitor-ipv4]] +===== ipv4 + +A Boolean value that specifies whether to ping using the ipv4 protocol if +hostnames are configured. The default is `true`. + +[[monitor-ipv6]] +===== ipv6 + +A Boolean value that specifies whether to ping using the ipv6 protocol +if hostnames are configured. The default is `true`. + +[[monitor-mode]] +===== mode + +If `mode` is `any`, the monitor pings only one IP address for a hostname. If +`mode` is `all`, the monitor pings all resolvable IPs for a hostname. The +`mode: all` setting is useful if you are using a DNS-load balancer and want to +ping every IP address for the specified hostname. The default is `any`. + +[[monitor-timieout]] +===== timeout + +The total running time for each ping test. This is the total time allowed for +testing the connection and exchanging data. The default is 16 seconds (16s). + +If the timeout is exceeded, Heartbeat publishes a `service-down` event. If the +value specified for `timeout` is greater than `schedule`, intermediate checks +will not be executed by the scheduler. + +[[monitor-watch-poll-file]] +===== watch.poll_file + +experimental[] + +The JSON file to watch for additional monitor configurations. The JSON file can +contain multiple objects, each of which specifies a different monitor config. +Heartbeat checks this file periodically and starts a new monitor instance for +each new JSON object added to the file. For example, imagine that you add +10 new entries to the JSON file, each for a different hostname. When Heartbeat +picks up the changes in the file, it merges the original config +(`hearbeat.yml`) plus the JSON objects, and starts a monitor for each new host +that you've configured. If you delete an object from the JSON file and it +doesn't exist in the main config, Heartbeat stops the monitor instance running +for that object. + +Each monitor has a unique ID that's based on parameters like protocol, host, +and port. If two monitors have the same ID, Heartbeat uses the settings that +are defined in the last JSON object of the merged config. This means that +you can specify settings in the JSON file that overwrite the settings in +the main config. In this way, the configuration that you specify for the +monitor in the main Heartbeat config file acts like a default config that you +can live-reconfigure by specifying additional configurations in the external +JSON file. + +Example configuration: + +[source, yaml] +------------------------------------------------------------------------------- +heartbeat.monitors: +- type: tcp + schedule: '*/5 * * * * * *' + hosts: ["myhost"] + watch.poll_file: + path: {path.config}/monitors/dynamic.json + interval: 5s +------------------------------------------------------------------------------- + +*`path`*:: Specifies the path to the JSON file to check for updates. +*`interval`*:: Specifies how often Heartbeat checks the file for changes. + +To reconfigure the settings specified in the example config, you could define +the following JSON objects in `dynamic.json`: + +[source, json] +------------------------------------------------------------------------------- +{"hosts": ["myhost:1234"], "schedule": "*/15 * * * * * *"} <1> +{"hosts": ["tls://otherhost:479"], "ssl.certificate_authorities": ["path/to/ca/file.pem"]} <2> +------------------------------------------------------------------------------- +<1> Upon detecting the changes, Heartbeat stops the old monitor and then +restarts it with a schedule of 15 seconds between checks. +<2> Heartbeat starts a new monitor that uses a TLS-based connection with a +custom CA certificate. + +[[monitor-icmp-options]] +==== ICMP Options + +These options configure Heartbeat to use ICMP (v4 and v6) Echo Requests to check +the configured hosts. These options are valid when the <> is +`icmp`. + +[[monitor-icmp-hosts]] +===== hosts + +A list of hosts to ping. + +[[monitor-icmp-wait]] +===== wait + +The duration to wait before emitting another ICMP Echo Request. The default is 1 +second (1s). + +[[monitor-tcp-options]] +==== TCP Options + +These options configure Heartbeat to connect via TCP and optionally verify the +endpoint by sending and/or receiving a custom payload. These options are valid when +the <> is `tcp`. + +[[monitor-tcp-hosts]] +===== hosts + +A list of hosts to ping. The entries in the list can be: + +* A plain host name, such as `localhost`, or an IP address. If you specify this +option, you must also specify a value for <>. If the +monitor is configured to use SSL, Heartbeat establishes an SSL/TLS-based +connection. Otherwise, it establishes a plain TCP connection. +* A hostname and port, such as `localhost:12345`. Heartbeat connects +to the port on the specified host. If the monitor is +<>, Heartbeat establishes an +SSL/TLS-based connection. Otherwise, it establishes a TCP connection. +* A full URL using the syntax `scheme://:[port]`, where: +** `scheme` is one of `tcp`, `plain`, `ssl` or `tls`. If `tcp` or `plain` is +specified, Heartbeat establishes a TCP connection even if the monitor is +configured to use SSL. If `tls` or `ssl` is specified, Heartbeat establishes +an SSL connection. However, if the monitor is not configured to use SSL, the +system defaults are used (currently not supported on Windows). +** `host` is the hostname. +** `port` is the port number. If `port` is missing in the URL, the +<> setting is required. + +[[monitor-tcp-ports]] +===== ports + +A list of ports to ping if the host specified in <> +does not contain a port number. + +Example configuration: + +[source,yaml] +------------------------------------------------------------------------------- +- type: tcp + schedule: '@every 5s' + hosts: ["myhost"] + ports: [80, 9200, 5044] +------------------------------------------------------------------------------- + +[[monitor-tcp-check]] +===== check + +An optional payload string to send to the remote host and the expected answer. +If no payload is specified, the endpoint is assumed to be available if the +connection attempt was successful. If `send` is specified without `receive`, +any response is accepted as OK. If `receive` is specified without `send`, no +payload is sent, but the client expects to receive a payload in the form of a +"hello message" or "banner" on connect. + +Example configuration: + +[source,yaml] +------------------------------------------------------------------------------- +- type: tcp + schedule: '@every 5s' + hosts: ["myhost"] + ports: [7] + check.send: 'Hello World' + check.receive: 'Hello World' +------------------------------------------------------------------------------- + + +[[monitor-tcp-proxy-url]] +===== proxy_url + +The URL of the SOCKS5 proxy to use when connecting to the server. The value +must be a URL with a scheme of socks5://. + +If the SOCKS5 proxy server requires client authentication, then a username and +password can be embedded in the URL as shown in the example. + +[source,yaml] +------------------------------------------------------------------------------- + proxy_url: socks5://user:password@socks5-proxy:2233 +------------------------------------------------------------------------------- + +When using a proxy, hostnames are resolved on the proxy server instead of on +the client. You can change this behavior by setting the +`proxy_use_local_resolver` option. + +[[monitor-tcp-proxy-use-local-resolver]] +===== proxy_use_local_resolver + +A Boolean value that determines whether hostnames are resolved locally instead +of being resolved on the proxy server. The default value is false, which means +that name resolution occurs on the proxy server. + +[[monitor-tcp-tls-ssl]] +===== ssl + +The TLS/SSL connection settings. If the monitor is +<>, it will attempt an SSL +handshake. If `check` is not configured, the monitor will only check to see if +it can establish an SSL/TLS connection. This check can fail either at TCP level +or during certificate validation. + +Example configuration: + +[source,yaml] +------------------------------------------------------------------------------- +- type: tcp + schedule: '@every 5s' + hosts: ["myhost"] + ports: [80, 9200, 5044] + ssl: + certificate_authorities: ['/etc/ca.crt'] + supported_protocols: ["TLSv1.0", "TLSv1.1", "TLSv1.2"] +------------------------------------------------------------------------------- + + +[[monitor-http-options]] +==== HTTP Options + +These options configure Heartbeat to connect via HTTP and optionally verify that +the host returns the expected response. These options are valid when the +<> is `http`. + +[[monitor-http-urls]] +===== urls + +A list of URLs to ping. + +Example configuration: + +[source,yaml] +------------------------------------------------------------------------------- +- type: http + schedule: '@every 5s' + urls: ["http://myhost:80"] +------------------------------------------------------------------------------- + + +[[monitor-http-proxy-url]] +===== proxy_url + +The HTTP proxy URL. This setting is optional. If not set, the HTTP_PROXY +environment variable is used. + +[[monitor-http-username]] +===== username + +The username for authenticating with the server. The credentials are passed +with the request. This setting is optional. + +You need to specify credentials when your `check.response` settings require it. +For example, you can check for a 403 response (`check.response.status: 403`) +without setting credentials. + +[[monitor-http-password]] +===== password + +The password for authenticating with the server. This setting is optional. + +[[monitor-http-tls-ssl]] +===== ssl + +The TLS/SSL connection settings for use with the HTTPS endpoint. If you don't +specify settings, the system defaults are used. + + +Example configuration: + +[source,yaml] +------------------------------------------------------------------------------- +- type: http + schedule: '@every 5s' + urls: ["https://myhost:80"] + ssl: + certificate_authorities: ['/etc/ca.crt'] + supported_protocols: ["TLSv1.0", "TLSv1.1", "TLSv1.2"] +------------------------------------------------------------------------------- + + +[[monitor-http-check]] +===== check + +An optional `request` to send to the remote host and the expected `response`. + +Example configuration: + +[source,yaml] +------------------------------------------------------------------------------- +- type: http + schedule: '@every 5s' + urls: ["http://myhost:80"] + check.request.method: HEAD + check.response.status: 200 +------------------------------------------------------------------------------- + + +Under `check.request`, specify these options: + +*`method`*:: The HTTP method to use. Valid values are `"HEAD"`, `"GET"` and +`"POST"`. +*`headers`*:: A dictionary of additional HTTP headers to send. +*`body`*:: Optional request body content. + +Under `check.response`, specify these options: + +*`status`*:: The expected status code. If this setting is not configured or +it's set to 0, any status code other than 404 is accepted. +*`headers`*:: The required response headers. +*`body`*:: The required response body content. + +The following configuration shows how to check the response when the body +contains JSON: + +[source,yaml] +------------------------------------------------------------------------------- +- type: http + schedule: '@every 5s' + urls: ["https://myhost:80"] +check.request: + method: GET + headers: + 'X-API-Key': '12345-mykey-67890' +check.response: + status: 200 + body: '{"status": "ok"}' +------------------------------------------------------------------------------- + + +[[monitors-scheduler]] +==== Scheduler Options + +You specify options under `scheduler` to control the behavior of the task +scheduler. + +Example configuration: + +[source,yaml] +------------------------------------------------------------------------------- +heartbeat.scheduler: + limit: 10 + location: 'UTC-08:00' +------------------------------------------------------------------------------- + +In the example, setting `limit` to 10 guarantees that only 10 concurrent +I/O tasks will be active. An I/O task can be the actual check or resolving an +address via DNS. + +[[heartbeat-scheduler-limit]] +===== limit + +The number of concurrent I/O tasks that Heartbeat is allowed to execute. If set +to 0, there is no limit. The default is 0. + +Most operating systems set a file descriptor limit of 1024. For Heartbeat to +operate correctly and not accidentally block libbeat output, the value that you +specify for `limit` should be below the configured ulimit. + + +[[heartbeat-scheduler-location]] +===== location + +The timezone for the scheduler. By default the scheduler uses localtime. + +include::../../../../libbeat/docs/generalconfig.asciidoc[] + +include::../../../../libbeat/docs/outputconfig.asciidoc[] + +include::../../../../libbeat/docs/shared-path-config.asciidoc[] + +include::../../../../libbeat/docs/loggingconfig.asciidoc[] + +include::../../../../libbeat/docs/processors-config.asciidoc[] + diff --git a/vendor/github.com/elastic/beats/heartbeat/docs/securing-heartbeat.asciidoc b/vendor/github.com/elastic/beats/heartbeat/docs/securing-heartbeat.asciidoc new file mode 100644 index 00000000..fe777ebe --- /dev/null +++ b/vendor/github.com/elastic/beats/heartbeat/docs/securing-heartbeat.asciidoc @@ -0,0 +1,27 @@ +[[securing-heartbeat]] += Securing Heartbeat + +[partintro] + +-- +The following topics describe how to secure communication between Heartbeat +and other products in the Elastic stack: + +* <> +* <> + +//sets block macro for https.asciidoc included in next section + +-- + +[[securing-communication-elasticsearch]] +== Securing Communication With Elasticsearch + +include::../../libbeat/docs/https.asciidoc[] + +//sets block macro for shared-ssl-logstash-config.asciidoc included in next section + +[[configuring-ssl-logstash]] +== Securing Communication With Logstash by Using SSL + +include::../../libbeat/docs/shared-ssl-logstash-config.asciidoc[] \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/heartbeat/docs/troubleshooting.asciidoc b/vendor/github.com/elastic/beats/heartbeat/docs/troubleshooting.asciidoc new file mode 100644 index 00000000..2eba1699 --- /dev/null +++ b/vendor/github.com/elastic/beats/heartbeat/docs/troubleshooting.asciidoc @@ -0,0 +1,30 @@ +[[troubleshooting]] += Troubleshooting + +[partintro] +-- +If you have issues installing or running Heartbeat, read the +following tips: + +* <> +* <> +* <> + +//sets block macro for getting-help.asciidoc included in next section + +-- + +[[getting-help]] +== Getting Help + +include::../../libbeat/docs/getting-help.asciidoc[] + +//sets block macro for debugging.asciidoc included in next section + +[[enable-heartbeat-debugging]] +== Debugging + +include::../../libbeat/docs/debugging.asciidoc[] + + + diff --git a/vendor/github.com/elastic/beats/heartbeat/heartbeat.full.yml b/vendor/github.com/elastic/beats/heartbeat/heartbeat.full.yml index 8a348aff..72576b16 100644 --- a/vendor/github.com/elastic/beats/heartbeat/heartbeat.full.yml +++ b/vendor/github.com/elastic/beats/heartbeat/heartbeat.full.yml @@ -1,5 +1,12 @@ ################### Heartbeat Configuration Example ######################### +# This file is a full configuration example documenting all non-deprecated +# options in comments. For a shorter configuration example, that contains +# only some common options, please see heartbeat.yml in the same directory. +# +# You can find the full configuration reference here: +# https://www.elastic.co/guide/en/beats/heartbeat/index.html + ############################# Heartbeat ###################################### # Configure monitors @@ -67,7 +74,7 @@ heartbeat.monitors: # Using `tls`/`ssl`, an SSL connection is established. If no ssl is configured, # system defaults will be used (not supported on windows). # If `port` is missing in url, the ports setting is required. - hosts: ["localhost:12345"] + hosts: ["localhost:9200"] # Configure IP protocol types to ping on if hostnames are configured. # Ping all resolvable IPs if `mode` is `all`, or only one IP if `mode` is `any`. @@ -115,7 +122,7 @@ heartbeat.monitors: - type: http # monitor type `http`. Connect via HTTP an optionally verify response # Monitor name used for job name and document type - #name: tcp + #name: http # Enable/Disable monitor #enabled: true @@ -124,7 +131,7 @@ heartbeat.monitors: schedule: '@every 5s' # every 5 seconds from start of beat # Configure URLs to ping - urls: ["http://localhost:80"] + urls: ["http://localhost:9200"] # Configure IP protocol types to ping on if hostnames are configured. # Ping all resolvable IPs if `mode` is `all`, or only one IP if `mode` is `any`. diff --git a/vendor/github.com/elastic/beats/heartbeat/heartbeat.template-es2x.json b/vendor/github.com/elastic/beats/heartbeat/heartbeat.template-es2x.json index 730f3823..8766920d 100644 --- a/vendor/github.com/elastic/beats/heartbeat/heartbeat.template-es2x.json +++ b/vendor/github.com/elastic/beats/heartbeat/heartbeat.template-es2x.json @@ -7,7 +7,7 @@ } }, "_meta": { - "version": "5.1.2" + "version": "5.2.2" }, "dynamic_templates": [ { diff --git a/vendor/github.com/elastic/beats/heartbeat/heartbeat.template.json b/vendor/github.com/elastic/beats/heartbeat/heartbeat.template.json index 2a2660ab..0d2bea86 100644 --- a/vendor/github.com/elastic/beats/heartbeat/heartbeat.template.json +++ b/vendor/github.com/elastic/beats/heartbeat/heartbeat.template.json @@ -5,7 +5,7 @@ "norms": false }, "_meta": { - "version": "5.1.2" + "version": "5.2.2" }, "dynamic_templates": [ { @@ -184,6 +184,7 @@ }, "order": 0, "settings": { + "index.mapping.total_fields.limit": 10000, "index.refresh_interval": "5s" }, "template": "heartbeat-*" diff --git a/vendor/github.com/elastic/beats/heartbeat/heartbeat.yml b/vendor/github.com/elastic/beats/heartbeat/heartbeat.yml index 303ab670..d1236efc 100644 --- a/vendor/github.com/elastic/beats/heartbeat/heartbeat.yml +++ b/vendor/github.com/elastic/beats/heartbeat/heartbeat.yml @@ -1,195 +1,27 @@ ################### Heartbeat Configuration Example ######################### +# This file is an example configuration file highlighting only some common options. +# The heartbeat.full.yml file in the same directory contains all the supported options +# with detailed comments. You can use it for reference. +# +# You can find the full configuration reference here: +# https://www.elastic.co/guide/en/beats/heartbeat/index.html + ############################# Heartbeat ###################################### # Configure monitors heartbeat.monitors: -- type: icmp # monitor type `icmp` (requires root) uses ICMP Echo Request to ping - # configured hosts +- type: http - # Monitor name used for job name and document type. - #name: icmp - - # Enable/Disable monitor - #enabled: true - - # Configure task schedule using cron-like syntax - schedule: '*/5 * * * * * *' # exactly every 5 seconds like 10:00:00, 10:00:05, ... - - # List of hosts to ping - hosts: ["localhost"] - - # Configure IP protocol types to ping on if hostnames are configured. - # Ping all resolvable IPs if `mode` is `all`, or only one IP if `mode` is `any`. - ipv4: true - ipv6: true - mode: any - - # Configure file json file to be watched for changes to the monitor: - #watch.poll_file: - # Path to check for updates. - #path: - - # Interval between file file changed checks. - #interval: 5s - - # Total running time per ping test. - timeout: 16s - - # Waiting duration until another ICMP Echo Request is emitted. - wait: 1s - -- type: tcp # monitor type `tcp`. Connect via TCP and optionally verify endpoint - # by sending/receiving a custom payload - - # Monitor name used for job name and document type - #name: tcp - - # Enable/Disable monitor - #enabled: true + # List or urls to query + urls: ["http://localhost:9200"] # Configure task schedule - schedule: '@every 5s' # every 5 seconds from start of beat - - # configure hosts to ping. - # Entries can be: - # - plain host name or IP like `localhost`: - # Requires ports configs to be checked. If ssl is configured, - # a SSL/TLS based connection will be established. Otherwise plain tcp connection - # will be established - # - hostname + port like `localhost:12345`: - # Connect to port on given host. If ssl is configured, - # a SSL/TLS based connection will be established. Otherwise plain tcp connection - # will be established - # - full url syntax. `scheme://:[port]`. The `` can be one of - # `tcp`, `plain`, `ssl` and `tls`. If `tcp`, `plain` is configured, a plain - # tcp connection will be established, even if ssl is configured. - # Using `tls`/`ssl`, an SSL connection is established. If no ssl is configured, - # system defaults will be used (not supported on windows). - # If `port` is missing in url, the ports setting is required. - hosts: ["localhost:12345"] - - # Configure IP protocol types to ping on if hostnames are configured. - # Ping all resolvable IPs if `mode` is `all`, or only one IP if `mode` is `any`. - ipv4: true - ipv6: true - mode: any - - # Configure file json file to be watched for changes to the monitor: - #watch.poll_file: - # Path to check for updates. - #path: - - # Interval between file file changed checks. - #interval: 5s - - # List of ports to ping if host does not contain a port number - # ports: [80, 9200, 5044] + schedule: '@every 10s' # Total test connection and data exchange timeout #timeout: 16s - # Optional payload string to send to remote and expected answer. If none is - # configured, the endpoint is expected to be up if connection attempt was - # successful. If only `send_string` is configured, any response will be - # accepted as ok. If only `receive_string` is configured, no payload will be - # send, but client expects to receive expected payload on connect. - #check: - #send: '' - #receive: '' - - # SOCKS5 proxy url - # proxy_url: '' - - # Resolve hostnames locally instead on SOCKS5 server: - #proxy_use_local_resolver: false - - # TLS/SSL connection settings: - #ssl: - # Certificate Authorities - #certificate_authorities: [''] - - # Required TLS protocols - #supported_protocols: ["TLSv1.0", "TLSv1.1", "TLSv1.2"] - -- type: http # monitor type `http`. Connect via HTTP an optionally verify response - - # Monitor name used for job name and document type - #name: tcp - - # Enable/Disable monitor - #enabled: true - - # Configure task schedule - schedule: '@every 5s' # every 5 seconds from start of beat - - # Configure URLs to ping - urls: ["http://localhost:80"] - - # Configure IP protocol types to ping on if hostnames are configured. - # Ping all resolvable IPs if `mode` is `all`, or only one IP if `mode` is `any`. - ipv4: true - ipv6: true - mode: any - - # Configure file json file to be watched for changes to the monitor: - #watch.poll_file: - # Path to check for updates. - #path: - - # Interval between file file changed checks. - #interval: 5s - - # Optional HTTP proxy url. If not set HTTP_PROXY environment variable will be used. - #proxy_url: '' - - # Total test connection and data exchange timeout - #timeout: 16s - - # Optional Authentication Credentials - #username: '' - #password: '' - - # TLS/SSL connection settings for use with HTTPS endpoint. If not configured - # system defaults will be used. - #ssl: - # Certificate Authorities - #certificate_authorities: [''] - - # Required TLS protocols - #supported_protocols: ["TLSv1.0", "TLSv1.1", "TLSv1.2"] - - # Request settings: - #check.request: - # Configure HTTP method to use. Only 'HEAD', 'GET' and 'POST' methods are allowed. - #method: "GET" - - # Dictionary of additional HTTP headers to send: - #headers: - - # Optional request body content - #body: - - # Expected response settings - #check.response: - # Expected status code. If not configured or set to 0 any status code not - # being 404 is accepted. - #status: 0 - - # Required response headers. - #headers: - - # Required response contents. - #body: - -heartbeat.scheduler: - # Limit number of concurrent tasks executed by heartbeat. The task limit if - # disabled if set to 0. The default is 0. - #limit: 0 - - # Set the scheduler it's timezone - #location: '' - #================================ General ===================================== # The name of the shipper that publishes the network data. It can be used to group diff --git a/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/check.go b/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/check.go index dd80556a..d0fcaa38 100644 --- a/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/check.go +++ b/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/check.go @@ -13,16 +13,15 @@ type RespCheck func(*http.Response) error var ( errBodyMismatch = errors.New("body mismatch") - errStatus404 = errors.New("file not found") ) -func makeValidateResponse(config *checkConfig) RespCheck { +func makeValidateResponse(config *responseParameters) RespCheck { var checks []RespCheck if config.Status > 0 { checks = append(checks, checkStatus(config.Status)) } else { - checks = append(checks, checkStatusNot404) + checks = append(checks, checkStatusOK) } if len(config.RecvHeaders) > 0 { @@ -33,20 +32,20 @@ func makeValidateResponse(config *checkConfig) RespCheck { checks = append(checks, checkBody([]byte(config.RecvBody))) } - switch len(checks) { - case 0: - return checkOK - case 1: - return checks[0] - default: - return checkAll(checks...) - } + return checkAll(checks...) } func checkOK(_ *http.Response) error { return nil } // TODO: collect all errors into on error message. func checkAll(checks ...RespCheck) RespCheck { + switch len(checks) { + case 0: + return checkOK + case 1: + return checks[0] + } + return func(r *http.Response) error { for _, check := range checks { if err := check(r); err != nil { @@ -66,9 +65,9 @@ func checkStatus(status uint16) RespCheck { } } -func checkStatusNot404(r *http.Response) error { - if r.StatusCode == 404 { - return errStatus404 +func checkStatusOK(r *http.Response) error { + if r.StatusCode >= 400 { + return errors.New(r.Status) } return nil } diff --git a/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/config.go b/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/config.go index a9d88d8c..fb525392 100644 --- a/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/config.go +++ b/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/config.go @@ -32,22 +32,29 @@ type Config struct { } type checkConfig struct { - // HTTP request configuration - Method string `config:"request.method"` // http request method - SendHeaders map[string]string `config:"request.headers"` // http request headers - SendBody string `config:"request.body"` // send body payload - Compression compressionConfig `config:"request.compression"` // optionally compress payload + Request requestParameters `config:"request"` + Response responseParameters `config:"response"` +} - // expected HTTP response configuration - Status uint16 `config:"response.status" verify:"min=0, max=699"` - RecvHeaders map[string]string `config:"response.headers"` - RecvBody string `config:"response.body"` +type requestParameters struct { + // HTTP request configuration + Method string `config:"method"` // http request method + SendHeaders map[string]string `config:"headers"` // http request headers + SendBody string `config:"body"` // send body payload + Compression compressionConfig `config:"compression"` // optionally compress payload // TODO: // - add support for cookies // - select HTTP version. golang lib will either use 1.1 or 2.0 if HTTPS is used, otherwise HTTP 1.1 . => implement/use specific http.RoundTripper implementation to change wire protocol/version being used } +type responseParameters struct { + // expected HTTP response configuration + Status uint16 `config:"status" verify:"min=0, max=699"` + RecvHeaders map[string]string `config:"headers"` + RecvBody string `config:"body"` +} + type compressionConfig struct { Type string `config:"type"` Level int `config:"level"` @@ -58,13 +65,25 @@ var defaultConfig = Config{ Timeout: 16 * time.Second, MaxRedirects: 10, Mode: monitors.DefaultIPSettings, + Check: checkConfig{ + Request: requestParameters{ + Method: "GET", + SendHeaders: nil, + SendBody: "", + }, + Response: responseParameters{ + Status: 0, + RecvHeaders: nil, + RecvBody: "", + }, + }, } -func (c *checkConfig) Validate() error { - switch strings.ToUpper(c.Method) { +func (r *requestParameters) Validate() error { + switch strings.ToUpper(r.Method) { case "HEAD", "GET", "POST": default: - return fmt.Errorf("HTTP method '%v' not supported", c.Method) + return fmt.Errorf("HTTP method '%v' not supported", r.Method) } return nil diff --git a/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/http.go b/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/http.go index 880ae455..986f1b0f 100644 --- a/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/http.go +++ b/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/http.go @@ -36,16 +36,16 @@ func create( var body []byte var enc contentEncoder - if config.Check.SendBody != "" { + if config.Check.Request.SendBody != "" { var err error - compression := config.Check.Compression + compression := config.Check.Request.Compression enc, err = getContentEncoder(compression.Type, compression.Level) if err != nil { return nil, err } buf := bytes.NewBuffer(nil) - err = enc.Encode(buf, bytes.NewBufferString(config.Check.SendBody)) + err = enc.Encode(buf, bytes.NewBufferString(config.Check.Request.SendBody)) if err != nil { return nil, err } @@ -53,7 +53,7 @@ func create( body = buf.Bytes() } - validator := makeValidateResponse(&config.Check) + validator := makeValidateResponse(&config.Check.Response) jobs := make([]monitors.Job, len(config.URLs)) diff --git a/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/task.go b/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/task.go index c9dcdf1c..b3dcd604 100644 --- a/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/task.go +++ b/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/task.go @@ -153,7 +153,7 @@ func createPingFactory( } func buildRequest(addr string, config *Config, enc contentEncoder) (*http.Request, error) { - method := strings.ToUpper(config.Check.Method) + method := strings.ToUpper(config.Check.Request.Method) request, err := http.NewRequest(method, addr, nil) if err != nil { return nil, err @@ -163,7 +163,7 @@ func buildRequest(addr string, config *Config, enc contentEncoder) (*http.Reques if config.Username != "" { request.SetBasicAuth(config.Username, config.Password) } - for k, v := range config.Check.SendHeaders { + for k, v := range config.Check.Request.SendHeaders { request.Header.Add(k, v) } diff --git a/vendor/github.com/elastic/beats/heartbeat/monitors/active/icmp/icmp.go b/vendor/github.com/elastic/beats/heartbeat/monitors/active/icmp/icmp.go index e3c7b5e2..b06d3c2b 100644 --- a/vendor/github.com/elastic/beats/heartbeat/monitors/active/icmp/icmp.go +++ b/vendor/github.com/elastic/beats/heartbeat/monitors/active/icmp/icmp.go @@ -57,6 +57,10 @@ func create( return nil, loopErr } + if err := loop.checkNetworkMode(ipVersion); err != nil { + return nil, err + } + typ := config.Name network := config.Mode.Network() pingFactory := monitors.MakePingIPFactory(nil, createPingIPFactory(&config)) diff --git a/vendor/github.com/elastic/beats/heartbeat/monitors/active/icmp/loop.go b/vendor/github.com/elastic/beats/heartbeat/monitors/active/icmp/loop.go index 453bc856..4319fe21 100644 --- a/vendor/github.com/elastic/beats/heartbeat/monitors/active/icmp/loop.go +++ b/vendor/github.com/elastic/beats/heartbeat/monitors/active/icmp/loop.go @@ -3,6 +3,7 @@ package icmp import ( "bytes" "encoding/binary" + "errors" "fmt" "math/rand" "net" @@ -12,6 +13,8 @@ import ( "golang.org/x/net/icmp" "golang.org/x/net/ipv4" "golang.org/x/net/ipv6" + + "github.com/elastic/beats/libbeat/logp" ) type icmpLoop struct { @@ -66,16 +69,12 @@ var ( ) func newICMPLoop() (*icmpLoop, error) { - conn4, err := icmp.ListenPacket("ip4:icmp", "") - if err != nil { - return nil, err - } - - conn6, err := icmp.ListenPacket("ip6:ipv6-icmp", "") - if err != nil { - conn4.Close() - return nil, err - } + // Log errors at info level, as the loop is setup globally when ICMP module is loaded + // first (not yet configured). + // With multiple configurations using the icmp loop, we have to postpose + // IPv4/IPv6 checking + conn4 := createListener("IPv4", "ip4:icmp") + conn6 := createListener("IPv6", "ip6:ipv6-icmp") l := &icmpLoop{ conn4: conn4, @@ -83,12 +82,40 @@ func newICMPLoop() (*icmpLoop, error) { recv: make(chan packet, 16), requests: map[requestID]*requestContext{}, } - go l.runICMPRecv(conn4, protocolICMP) - go l.runICMPRecv(conn6, protocolIPv6ICMP) + + if conn4 != nil { + go l.runICMPRecv(conn4, protocolICMP) + } + if conn6 != nil { + go l.runICMPRecv(conn6, protocolIPv6ICMP) + } return l, nil } +func (l *icmpLoop) checkNetworkMode(mode string) error { + ip4, ip6 := false, false + switch mode { + case "ip4": + ip4 = true + case "ip6": + ip6 = true + case "ip": + ip4, ip6 = true, true + default: + return fmt.Errorf("'%v' is not supported", mode) + } + + if ip4 && l.conn4 == nil { + return errors.New("failed to initiate IPv4 support") + } + if ip6 && l.conn6 == nil { + return errors.New("failed to initiate IPv6 support") + } + + return nil +} + func (l *icmpLoop) runICMPRecv(conn *icmp.PacketConn, proto int) { for { bytes := make([]byte, 512) @@ -99,7 +126,7 @@ func (l *icmpLoop) runICMPRecv(conn *icmp.PacketConn, proto int) { if neterr.Timeout() { continue } else { - // TODO: report error and quit loop + // TODO: report error and quit loop? return } } @@ -293,6 +320,19 @@ func (l *icmpLoop) sendEchoRequest(addr *net.IPAddr) (*requestContext, error) { return ctx, nil } +func createListener(name, network string) *icmp.PacketConn { + conn, err := icmp.ListenPacket(network, "") + + // XXX: need to check for conn == nil, as 'err != nil' seems always to be + // true, even if error value itself is `nil`. Checking for conn supresses + // missleading log message. + if conn == nil && err != nil { + logp.Info("%v ICMP not supported: %v", name, err) + return nil + } + return conn +} + // timeoutError implements net.Error interface func (timeoutError) Error() string { return "ping timeout" } func (timeoutError) Timeout() bool { return true } diff --git a/vendor/github.com/elastic/beats/heartbeat/monitors/util.go b/vendor/github.com/elastic/beats/heartbeat/monitors/util.go index fe81ab70..9045d6a6 100644 --- a/vendor/github.com/elastic/beats/heartbeat/monitors/util.go +++ b/vendor/github.com/elastic/beats/heartbeat/monitors/util.go @@ -281,20 +281,14 @@ func (f funcTask) annotated(start time.Time, typ string) TaskRunner { return annotated(start, typ, f.run) } -func (p *PingMode) Unpack(v interface{}) error { - var fail = errors.New("expecting 'any' or 'all'") - s, ok := v.(string) - if !ok { - return fail - } - +func (p *PingMode) Unpack(s string) error { switch s { case "all": *p = PingAll case "any": *p = PingAny default: - return fail + return fmt.Errorf("expecting 'any' or 'all', not '%v'", s) } return nil } diff --git a/vendor/github.com/elastic/beats/heartbeat/scheduler/schedule/cron/cron.go b/vendor/github.com/elastic/beats/heartbeat/scheduler/schedule/cron/cron.go index 1bef22e2..e7ade343 100644 --- a/vendor/github.com/elastic/beats/heartbeat/scheduler/schedule/cron/cron.go +++ b/vendor/github.com/elastic/beats/heartbeat/scheduler/schedule/cron/cron.go @@ -1,7 +1,6 @@ package cron import ( - "errors" "time" "github.com/gorhill/cronexpr" @@ -27,17 +26,10 @@ func (s *Schedule) Next(t time.Time) time.Time { return expr.Next(t) } -func (s *Schedule) Unpack(in interface{}) error { - str, ok := in.(string) - if !ok { - return errors.New("scheduler string required") - } - +func (s *Schedule) Unpack(str string) error { tmp, err := Parse(str) - if err != nil { - return err + if err == nil { + *s = *tmp } - - *s = *tmp - return nil + return err } diff --git a/vendor/github.com/elastic/beats/heartbeat/scheduler/schedule/schedule.go b/vendor/github.com/elastic/beats/heartbeat/scheduler/schedule/schedule.go index 7b77ee92..1d8a47d4 100644 --- a/vendor/github.com/elastic/beats/heartbeat/scheduler/schedule/schedule.go +++ b/vendor/github.com/elastic/beats/heartbeat/scheduler/schedule/schedule.go @@ -1,7 +1,6 @@ package schedule import ( - "errors" "strings" "time" @@ -43,17 +42,10 @@ func (s intervalScheduler) Next(t time.Time) time.Time { return t.Add(s.interval) } -func (s *Schedule) Unpack(in interface{}) error { - str, ok := in.(string) - if !ok { - return errors.New("scheduler string required") - } - +func (s *Schedule) Unpack(str string) error { tmp, err := Parse(str) - if err != nil { - return err + if err == nil { + *s = *tmp } - - *s = *tmp - return nil + return err } diff --git a/vendor/github.com/elastic/beats/libbeat/Dockerfile b/vendor/github.com/elastic/beats/libbeat/Dockerfile index a09e6944..00f00464 100644 --- a/vendor/github.com/elastic/beats/libbeat/Dockerfile +++ b/vendor/github.com/elastic/beats/libbeat/Dockerfile @@ -1,10 +1,11 @@ # Beats dockerfile used for testing -FROM golang:1.7.1 +FROM golang:1.7.4 MAINTAINER Nicolas Ruflin RUN set -x && \ apt-get update && \ - apt-get install -y netcat python-virtualenv python-pip && \ + apt-get install -y --no-install-recommends \ + netcat python-pip virtualenv && \ apt-get clean ENV PYTHON_ENV=/tmp/python-env diff --git a/vendor/github.com/elastic/beats/libbeat/beat/version.go b/vendor/github.com/elastic/beats/libbeat/beat/version.go index 8cdf2259..abedda5e 100644 --- a/vendor/github.com/elastic/beats/libbeat/beat/version.go +++ b/vendor/github.com/elastic/beats/libbeat/beat/version.go @@ -1,3 +1,3 @@ package beat -const defaultBeatVersion = "5.1.2" +const defaultBeatVersion = "5.2.2" diff --git a/vendor/github.com/elastic/beats/libbeat/common/jsontransform/jsonhelper.go b/vendor/github.com/elastic/beats/libbeat/common/jsontransform/jsonhelper.go new file mode 100644 index 00000000..37bdb7e9 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/common/jsontransform/jsonhelper.go @@ -0,0 +1,51 @@ +package jsontransform + +import ( + "fmt" + "time" + + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/logp" +) + +func WriteJSONKeys(event common.MapStr, keys map[string]interface{}, overwriteKeys bool, errorKey string) { + for k, v := range keys { + if overwriteKeys { + if k == "@timestamp" { + vstr, ok := v.(string) + if !ok { + logp.Err("JSON: Won't overwrite @timestamp because value is not string") + event[errorKey] = "@timestamp not overwritten (not string)" + continue + } + + // @timestamp must be of format RFC3339 + ts, err := time.Parse(time.RFC3339, vstr) + if err != nil { + logp.Err("JSON: Won't overwrite @timestamp because of parsing error: %v", err) + event[errorKey] = fmt.Sprintf("@timestamp not overwritten (parse error on %s)", vstr) + continue + } + event[k] = common.Time(ts) + } else if k == "type" { + vstr, ok := v.(string) + if !ok { + logp.Err("JSON: Won't overwrite type because value is not string") + event[errorKey] = "type not overwritten (not string)" + continue + } + if len(vstr) == 0 || vstr[0] == '_' { + logp.Err("JSON: Won't overwrite type because value is empty or starts with an underscore") + event[errorKey] = fmt.Sprintf("type not overwritten (invalid value [%s])", vstr) + continue + } + event[k] = vstr + } else { + event[k] = v + } + } else if _, exists := event[k]; !exists { + event[k] = v + } + + } +} diff --git a/vendor/github.com/elastic/beats/libbeat/common/net.go b/vendor/github.com/elastic/beats/libbeat/common/net.go index 9c9645cc..0d258046 100644 --- a/vendor/github.com/elastic/beats/libbeat/common/net.go +++ b/vendor/github.com/elastic/beats/libbeat/common/net.go @@ -8,15 +8,29 @@ import ( // LocalIPAddrs finds the IP addresses of the hosts on which // the shipper currently runs on. func LocalIPAddrs() ([]net.IP, error) { - var localIPAddrs = []net.IP{} + var localIPAddrs []net.IP ipaddrs, err := net.InterfaceAddrs() if err != nil { - return []net.IP{}, err + return nil, err } - for _, ipaddr := range ipaddrs { - if ipnet, ok := ipaddr.(*net.IPNet); ok { - localIPAddrs = append(localIPAddrs, ipnet.IP) + for _, addr := range ipaddrs { + var ip net.IP + ok := true + + switch v := addr.(type) { + case *net.IPNet: + ip = v.IP + case *net.IPAddr: + ip = v.IP + default: + ok = false } + + if !ok { + continue + } + + localIPAddrs = append(localIPAddrs, ip) } return localIPAddrs, nil } diff --git a/vendor/github.com/elastic/beats/libbeat/dashboards/Makefile b/vendor/github.com/elastic/beats/libbeat/dashboards/Makefile index cf13bfbc..9fc807cf 100644 --- a/vendor/github.com/elastic/beats/libbeat/dashboards/Makefile +++ b/vendor/github.com/elastic/beats/libbeat/dashboards/Makefile @@ -1,5 +1,5 @@ BEAT?= -import_dasboards: import_dashboards.go +import_dashboards: go build -ldflags "-X main.beat=${BEAT}" -o import_dashboards diff --git a/vendor/github.com/elastic/beats/libbeat/dashboards/import_dashboards.go b/vendor/github.com/elastic/beats/libbeat/dashboards/import_dashboards.go index 6afa4c31..658ee890 100644 --- a/vendor/github.com/elastic/beats/libbeat/dashboards/import_dashboards.go +++ b/vendor/github.com/elastic/beats/libbeat/dashboards/import_dashboards.go @@ -18,8 +18,10 @@ import ( lbeat "github.com/elastic/beats/libbeat/beat" "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/common/fmtstr" + "github.com/elastic/beats/libbeat/outputs" "github.com/elastic/beats/libbeat/outputs/elasticsearch" "github.com/elastic/beats/libbeat/outputs/outil" + "github.com/elastic/beats/libbeat/outputs/transport" ) var usage = fmt.Sprintf(` @@ -42,18 +44,22 @@ For more details, check https://www.elastic.co/guide/en/beats/libbeat/5.0/import var beat string type Options struct { - KibanaIndex string - ES string - Index string - Dir string - File string - Beat string - URL string - User string - Pass string - OnlyDashboards bool - OnlyIndex bool - Snapshot bool + KibanaIndex string + ES string + Index string + Dir string + File string + Beat string + URL string + User string + Pass string + Certificate string + CertificateKey string + CertificateAuthority string + Insecure bool // Allow insecure SSL connections. + OnlyDashboards bool + OnlyIndex bool + Snapshot bool } type CommandLine struct { @@ -91,6 +97,10 @@ func DefineCommandLine() (*CommandLine, error) { cl.flagSet.BoolVar(&cl.opt.OnlyDashboards, "only-dashboards", false, "Import only dashboards together with visualizations and searches. By default import both, dashboards and the index-pattern.") cl.flagSet.BoolVar(&cl.opt.OnlyIndex, "only-index", false, "Import only the index-pattern. By default imports both, dashboards and the index pattern.") cl.flagSet.BoolVar(&cl.opt.Snapshot, "snapshot", false, "Import dashboards from snapshot builds.") + cl.flagSet.StringVar(&cl.opt.CertificateAuthority, "cacert", "", "Certificate Authority for server verification") + cl.flagSet.StringVar(&cl.opt.Certificate, "cert", "", "Certificate for SSL client authentication in PEM format.") + cl.flagSet.StringVar(&cl.opt.CertificateKey, "key", "", "Client Certificate Key in PEM format.") + cl.flagSet.BoolVar(&cl.opt.Insecure, "insecure", false, `Allows "insecure" SSL connections`) return &cl, nil } @@ -107,6 +117,14 @@ func (cl *CommandLine) ParseCommandLine() error { return errors.New("ERROR: Missing input. Please specify one of the options -file, -url or -dir") } + if cl.opt.Certificate != "" && cl.opt.CertificateKey == "" { + return errors.New("ERROR: A certificate key needs to be passed as well by using the -key option.") + } + + if cl.opt.CertificateKey != "" && cl.opt.Certificate == "" { + return errors.New("ERROR: A certificate needs to be passed as well by using the -cert option.") + } + return nil } @@ -133,11 +151,35 @@ func New() (*Importer, error) { } indexSel := outil.MakeSelector(outil.FmtSelectorExpr(fmtstr, "")) + var tlsConfig outputs.TLSConfig + var tls *transport.TLSConfig + + if cl.opt.Insecure { + tlsConfig.VerificationMode = transport.VerifyNone + } + + if len(cl.opt.Certificate) > 0 && len(cl.opt.CertificateKey) > 0 { + tlsConfig.Certificate = outputs.CertificateConfig{ + Certificate: cl.opt.Certificate, + Key: cl.opt.CertificateKey, + } + } + + if len(cl.opt.CertificateAuthority) > 0 { + tlsConfig.CAs = []string{cl.opt.CertificateAuthority} + } + + tls, err = outputs.LoadTLSConfig(&tlsConfig) + if err != nil { + return nil, fmt.Errorf("fail to load the SSL certificate: %s", err) + } + /* connect to Elasticsearch */ client, err := elasticsearch.NewClient( elasticsearch.ClientSettings{ URL: cl.opt.ES, Index: indexSel, + TLS: tls, Username: cl.opt.User, Password: cl.opt.Pass, Timeout: 60 * time.Second, diff --git a/vendor/github.com/elastic/beats/libbeat/docker-compose.yml b/vendor/github.com/elastic/beats/libbeat/docker-compose.yml index b0ec5312..f6b7f683 100644 --- a/vendor/github.com/elastic/beats/libbeat/docker-compose.yml +++ b/vendor/github.com/elastic/beats/libbeat/docker-compose.yml @@ -2,7 +2,9 @@ version: '2' services: beat: build: ${PWD}/. - links: + depends_on: + - logstash + - elasticsearch - redis - sredis - kafka @@ -28,8 +30,16 @@ services: command: make entrypoint: /go/src/github.com/elastic/beats/libbeat/scripts/docker-entrypoint.sh + elasticsearch: + extends: + file: ../testing/environments/${TESTING_ENVIRONMENT}.yml + service: elasticsearch + # This host name is fixed because of the certificate logstash: + extends: + file: ../testing/environments/${TESTING_ENVIRONMENT}.yml + service: logstash env_file: - ${PWD}/build/test.env @@ -56,9 +66,3 @@ services: - 2181 environment: - ADVERTISED_HOST=kafka - - # Overloading kibana with a simple image as it is not needed here - kibana: - build: - context: ${PWD}/../testing/environments/ - dockerfile: Dockerfile diff --git a/vendor/github.com/elastic/beats/libbeat/docs/communitybeats.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/communitybeats.asciidoc index a33191b9..50205883 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/communitybeats.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/communitybeats.asciidoc @@ -9,11 +9,13 @@ NOTE: Elastic provides no warranty or support for community-sourced Beats. [horizontal] https://github.com/awormuth/amazonbeat[amazonbeat]:: Reads data from a specified Amazon product. https://github.com/radoondas/apachebeat[apachebeat]:: Reads status from Apache HTTPD server-status. +https://github.com/verticle-io/apexbeat[apexbeat]:: Extracts configurable contextual data and metrics from Java applications via the http://toolkits.verticle.io[APEX] toolkit. https://github.com/goomzee/burrowbeat[burrowbeat]:: Monitors Kafka consumer lag using Burrow. https://github.com/goomzee/cassandrabeat[cassandrabeat]:: Uses Cassandra's nodetool cfstats utility to monitor Cassandra database nodes and lag. https://github.com/hartfordfive/cloudflarebeat[cloudflarebeat]:: Indexes log entries from the Cloudflare Enterprise Log Share API. https://github.com/aidan-/cloudtrailbeat[cloudtrailbeat]:: Reads events from Amazon Web Services' https://aws.amazon.com/cloudtrail/[CloudTrail]. -https://github.com/Pravoru/consulbeat[consulbeat]:: Reads services health checks from consul and pushes them to elastic. +https://github.com/raboof/connbeat[connbeat]:: Exposes metadata about TCP connections. +https://github.com/Pravoru/consulbeat[consulbeat]:: Reads services health checks from consul and pushes them to Elastic. https://github.com/Ingensi/dockbeat[dockbeat]:: Reads Docker container statistics and indexes them in Elasticsearch. https://github.com/radoondas/elasticbeat[elasticbeat]:: Reads status from an Elasticsearch cluster and indexes them in Elasticsearch. @@ -25,10 +27,12 @@ https://github.com/YaSuenag/hsbeat[hsbeat]:: Reads all performance counters in J https://github.com/christiangalsterer/httpbeat[httpbeat]:: Polls multiple HTTP(S) endpoints and sends the data to Logstash or Elasticsearch. Supports all HTTP methods and proxies. https://github.com/jasperla/hwsensorsbeat[hwsensorsbeat]:: Reads sensors information from OpenBSD. +https://github.com/devopsmakers/iobeat[iobeat]:: Reads IO stats from /proc/diskstats on Linux. https://github.com/radoondas/jmxproxybeat[jmxproxybeat]:: Reads Tomcat JMX metrics exposed over 'JMX Proxy Servlet' to HTTP. https://github.com/mheese/journalbeat[journalbeat]:: Used for log shipping from systemd/journald based Linux systems. https://github.com/eskibars/lmsensorsbeat[lmsensorsbeat]:: Collects data from lm-sensors (such as CPU temperatures, fan speeds, and voltages from i2c and smbus). https://github.com/consulthys/logstashbeat[logstashbeat]:: Collects data from Logstash monitoring API (v5 onwards) and indexes them in Elasticsearch. +https://github.com/yedamao/mcqbeat[mcqbeat]:: Reads the status of queues from memcacheq. https://github.com/scottcrespo/mongobeat[mongobeat]:: Monitors MongoDB instances and can be configured to send multiple document types to Elasticsearch. https://github.com/adibendahan/mysqlbeat[mysqlbeat]:: Run any query on MySQL and send results to Elasticsearch. https://github.com/PhaedrusTheGreek/nagioscheckbeat[nagioscheckbeat]:: For Nagios checks and performance data. @@ -40,6 +44,7 @@ managers. https://github.com/kozlice/phpfpmbeat[phpfpmbeat]:: Reads status from PHP-FPM. https://github.com/joshuar/pingbeat[pingbeat]:: Sends ICMP pings to a list of targets and stores the round trip time (RTT) in Elasticsearch. +https://github.com/carlpett/prombeat[prombeat]:: Indexes https://prometheus.io[Prometheus] metrics. https://github.com/voigt/redditbeat[redditbeat]:: Collects new Reddit Submissions of one or multiple Subreddits. https://github.com/chrsblck/redisbeat[redisbeat]:: Used for Redis monitoring. https://github.com/consulthys/retsbeat[retsbeat]:: Collects counts of http://www.reso.org[RETS] resource/class records from https://en.wikipedia.org/wiki/Multiple_listing_service[Multiple Listing Service] (MLS) servers. diff --git a/vendor/github.com/elastic/beats/libbeat/docs/dashboards.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/dashboards.asciidoc index de6e95ae..9e35cbb4 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/dashboards.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/dashboards.asciidoc @@ -19,7 +19,7 @@ visualizations, and searches for {beatname_uc}. The script also creates an index +{beatname_lc}-*+, for {beatname_uc}. The steps in this section show how to import {beatname_uc} dashboards. You may want to import dashboards for more than -one Beat or specify import options that aren't described here. See {libbeat}/[Importing Existing Beat Dashboards] +one Beat or specify import options that aren't described here. See {libbeat}/import-dashboards.html[Importing Existing Beat Dashboards] in the _Beats Platform Reference_ for a full list of command-line options. To import the Kibana dashboards for {beatname_uc}: @@ -96,11 +96,10 @@ PS > scripts\import_dashboards.exe -es https://xyz.found.io -user user -pass pas After importing the dashboards, launch the Kibana web interface by pointing your browser to port 5601. For example, http://127.0.0.1:5601[http://127.0.0.1:5601]. -If Kibana shows a `No default index pattern` warning, you must select or create -an index pattern to continue. To resolve the issue, select the -predefined +{beatname_lc}-*+ index pattern and set it as the default. +On the *Discover* page, make sure that the predefined +{beatname_lc}-*+ index +pattern is selected to see {beatname_uc} data. -image:./images/kibana-created-indexes.png[Kibana configured indexes] +image:./images/kibana-created-indexes.png[Discover tab with index selected] To open the loaded dashboards, go to the *Dashboard* page and click *Open*. Select the dashboard that you want to open. diff --git a/vendor/github.com/elastic/beats/libbeat/docs/gettingstarted.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/gettingstarted.asciidoc index 6435529c..cbeb8936 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/gettingstarted.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/gettingstarted.asciidoc @@ -112,14 +112,15 @@ You should see a response similar to this: ["source","sh",subs="attributes"] ---------------------------------------------------------------------- { - "name" : "Angela Del Toro", + "name" : "ofgAtrJ", "cluster_name" : "elasticsearch", + "cluster_uuid" : "3h9xSrVlRJmDHgQ8FLnByA", "version" : { "number" : "{ES-version}", - "build_hash" : "8ff36d139e16f8720f2947ef62c8167a888992fe", - "build_timestamp" : "2016-01-27T13:32:39Z", + "build_hash" : "db0d481", + "build_date" : "2017-02-09T22:05:32.386Z", "build_snapshot" : false, - "lucene_version" : "6.1.0" + "lucene_version" : "6.4.1" }, "tagline" : "You Know, for Search" } diff --git a/vendor/github.com/elastic/beats/libbeat/docs/index.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/index.asciidoc index 3978f3e8..eb4b3c82 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/index.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/index.asciidoc @@ -7,7 +7,10 @@ include::./version.asciidoc[] :metricbeat: http://www.elastic.co/guide/en/beats/metricbeat/{doc-branch} :filebeat: http://www.elastic.co/guide/en/beats/filebeat/{doc-branch} :winlogbeat: http://www.elastic.co/guide/en/beats/winlogbeat/{doc-branch} -:securitydoc: https://www.elastic.co/guide/en/x-pack/5.0 +:heartbeat: http://www.elastic.co/guide/en/beats/heartbeat/{doc-branch} +:securitydoc: https://www.elastic.co/guide/en/x-pack/5.2 +:beatname_lc: beatname +:beatname_uc: a Beat :security: X-Pack Security :ES-version: {stack-version} :LS-version: {stack-version} diff --git a/vendor/github.com/elastic/beats/libbeat/docs/installing-beats.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/installing-beats.asciidoc index 96b2f574..6d99e75e 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/installing-beats.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/installing-beats.asciidoc @@ -25,5 +25,6 @@ Each Beat is a separately installable product. To get up and running quickly wit * {metricbeat}/metricbeat-getting-started.html[Metricbeat] * {filebeat}/filebeat-getting-started.html[Filebeat] * {winlogbeat}/winlogbeat-getting-started.html[Winlogbeat] +* {heartbeat}/heartbeat-getting-started.html[Heartbeat] diff --git a/vendor/github.com/elastic/beats/libbeat/docs/newbeat.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/newbeat.asciidoc index 9d02abe4..06c49898 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/newbeat.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/newbeat.asciidoc @@ -42,7 +42,7 @@ https://github.com/elastic/beats/blob/master/CONTRIBUTING.md[Contributing Guide] After you have https://golang.org/doc/install[installed Go] and set up the https://golang.org/doc/code.html#GOPATH[GOPATH] environment variable to point to -your preferred workspace location, a simple way of getting the source code for +your preferred workspace location, a simple way of getting the source code for Beats, including libbeat and the Beat generator, is to do: [source,shell] @@ -52,7 +52,14 @@ go get github.com/elastic/beats When you run the command, all source files are downloaded to the `$GOPATH/src/github.com/elastic/beats` path. You can ignore the "no buildable Go source files" message because -you will build the source later. +you will build the source later. By default `go get` fetches the master branch. To build your beat +on a specific version of libbeat, check out the specific branch ({doc-branch} in the example below): + +["source","sh",subs="attributes"] +---- +cd $GOPATH/src/github.com/elastic/beats +git checkout {doc-branch} +---- [[newbeat-overview]] === Overview @@ -119,7 +126,7 @@ To generate your own Beat, you use the Beat generator available in the beats rep downloaded the Beats source code yet, follow the instructions in <>. Before running the Beat generator, you must install https://github.com/audreyr/cookiecutter[cookiecutter], a -command-line utility that creates projects from project templates. Check out the +command-line utility that creates projects from project templates. Check out the http://cookiecutter.readthedocs.io/en/latest/installation.html[installation guide]. After installing cookiecutter, decide on a name for your beat. The name should be one word with the first letter capitalized. In our example, we use `Countbeat`. @@ -151,14 +158,14 @@ beat_path [github.com/{github id}]: full_name [Firstname Lastname]: {Full Name} --------- -The Beat generator creates a directory called `countbeat` inside of your project folder. +The Beat generator creates a directory called `countbeat` inside of your project folder. You now have a raw template of the Beat, but you still need to <>. [[setting-up-beat]] === Fetching Dependencies and Setting up the Beat -First you need to install the following tools: +First you need to install the following tools: * https://www.python.org/downloads/[Python] * https://virtualenv.pypa.io/en/stable/[virtualenv] @@ -181,7 +188,7 @@ To compile the Beat, make sure you are in the Beat directory (`$GOPATH/src/githu [source,shell] --------- -make +make --------- Running this command creates the binary called `countbeat` in `$GOPATH/src/github.com/{user}/countbeat`. @@ -190,12 +197,12 @@ Now run the Beat: [source,shell] --------- -./countbeat -e -d "*" +./countbeat -e -d "*" --------- The command automatically loads the default config file, `countbeat.yml`, and sends debug output to the console. -You can stop the Beat by pressing `Ctrl+C`. +You can stop the Beat by pressing `Ctrl+C`. [[beater-interface]] === The Beater Interface @@ -227,7 +234,7 @@ type Beater interface { ---------------------------------------------------------------------- To implement the Beater interface, you need to define a Beat object that -implements two methods: <> and <>. +implements two methods: <> and <>. [source,go] -------------- @@ -312,7 +319,7 @@ type's default value. The recommended way of handling the configuration (as shown in the code example) is to create a `Config` structure with the configuration options and a `DefaultConfig` with -the default configuration options. +the default configuration options. When you use the Beat generator, the Go structures for a basic config are added to `config/config.go`: @@ -333,7 +340,7 @@ var DefaultConfig = Config{ ---------------------------------------------------------------------- -This mirrors the config options that are defined in the config file, `countbeat.yml`. +This mirrors the config options that are defined in the config file, `countbeat.yml`. [source,yaml] ------------ @@ -343,12 +350,12 @@ countbeat: ------------ - `period`: Defines how often to send out events - + The config file is generated when you run `make setup` to <>. The file contains basic configuration information. To add configuration options to your Beat, you need to update the Go structures in `config/config.go` and add the corresponding config options to `etc/beat.yml`. -For example, if you add a config option called `path` to the Go structures: +For example, if you add a config option called `path` to the Go structures: [source,go] ---------------------------------------------------------------------- @@ -364,7 +371,7 @@ var DefaultConfig = Config{ ---------------------------------------------------------------------- -You also need to add `path` to `etc/beat.yml`: +You also need to add `path` to `etc/beat.yml`: [source,yml] ---------------------------------------------------------------------- @@ -383,7 +390,7 @@ make update [[run-method]] ==== Run Method -The `Run` method contains your main application loop. +The `Run` method contains your main application loop. [source,go] ---------------------------------------------------------------------- @@ -419,8 +426,8 @@ Inside the loop, the Beat sleeps for a configurable period of time and then captures the required data and sends it to the publisher. The publisher client is available as part of the Beat object through the `client` variable. -The `event := common.MapStr{}` stores the event in a json format, and `bt.client.PublishEvent(event)` publishes data to Elasticsearch. -In the generated Beat, there are three fields in the event: @timestamp, type, and counter. +The `event := common.MapStr{}` stores the event in a json format, and `bt.client.PublishEvent(event)` publishes data to Elasticsearch. +In the generated Beat, there are three fields in the event: @timestamp, type, and counter. When you add fields to the event object, you also need to add them to the `etc/fields.yml` file: @@ -446,7 +453,7 @@ For more detail about naming the fields in an event, see <>. The `Stop` method is called when the Beat is signaled to stop, for example through the SIGTERM signal on Unix systems or the service control -interface on Windows. This method simply closes the channel +interface on Windows. This method simply closes the channel which breaks the main loop. [source,go] @@ -488,7 +495,7 @@ func main() { === Example: Building Lsbeat from Scratch https://github.com/kimjmin/lsbeat[Lsbeat] is similar to the `ls` command-line tool, but instead of printing the files and subdirectories to the screen, -Lsbeat periodically ships them to Elasticsearch for storage. +Lsbeat periodically ships them to Elasticsearch for storage. To help you learn how to build a Beat, we've created this http://elastic.co//blog/build-your-own-beat[blog post] that describes how to build Lsbeat from scratch. You can refer to the Lsbeat implementation as a working example. diff --git a/vendor/github.com/elastic/beats/libbeat/docs/newdashboards.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/newdashboards.asciidoc index b6a5594e..35237669 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/newdashboards.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/newdashboards.asciidoc @@ -151,6 +151,13 @@ The Beat name. The Beat name is required when importing from a zip archive. When the Beat. When running the script from source, the default value is "", so you need to set this option in order to install the index pattern and the dashboards for a single Beat. Otherwise the script imports the index pattern and the dashboards for all Beats. +*`-cacert `*:: +The Certificate Authority to use for server verification. + +*`-cert `*:: +The certificate to use for SSL client authentication. The certificate must be in +PEM format. + *`-dir `*:: Local directory that contains the subdirectories: dashboard, visualization, search, and index-pattern. The default value is the current directory. @@ -164,9 +171,15 @@ Local zip archive with the dashboards. The archive can contain Kibana dashboards You should only use this option if you want to change the index pattern name that's used by default. For example, if the default is `metricbeat-*`, you can change it to `custombeat-*`. +*`-insecure`*:: +If specified, "insecure" SSL connections are allowed. + *`-k `*:: The Elasticsearch index pattern where Kibana saves its configuration. The default value is `.kibana`. +*`-key `*:: +The client certificate key. The key must be in PEM format. + *`-only-dashboards`*:: If specified, then only the dashboards, along with their visualizations and searches, are imported. The index pattern is not imported. By default, this is false. diff --git a/vendor/github.com/elastic/beats/libbeat/docs/outputconfig.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/outputconfig.asciidoc index 0d58a2f1..4dac8941 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/outputconfig.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/outputconfig.asciidoc @@ -363,8 +363,8 @@ Every event sent to Logstash contains additional metadata for indexing and filte In Logstash, you can configure the Elasticsearch output plugin to use the metadata and event type for indexing. -The following *Logstash 1.5* configuration file sets Logstash to use the index and -document type reported by Beats for indexing events into Elasticsearch. +The following Logstash configuration file for the versions 2.x and 5.x sets Logstash to +use the index and document type reported by Beats for indexing events into Elasticsearch. The index used will depend on the `@timestamp` field as identified by Logstash. [source,logstash] @@ -376,28 +376,6 @@ input { } } -output { - elasticsearch { - host => "localhost" - port => "9200" - protocol => "http" - index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}" - document_type => "%{[@metadata][type]}" - } -} ------------------------------------------------------------------------------- - -Here is the same configuration for *Logstash 2.x* releases: - -[source,logstash] ------------------------------------------------------------------------------- - -input { - beats { - port => 5044 - } -} - output { elasticsearch { hosts => ["http://localhost:9200"] @@ -617,7 +595,8 @@ Valid values are `0.8.2.0`, `0.8.2.1`, `0.8.2.2`, `0.8.2`, `0.8`, `0.9.0.0`, ===== username -The username for connecting to Kafka. If username is configured, the passowrd must be configured as well. Only SASL/PLAIN is supported. +The username for connecting to Kafka. If username is configured, the password +must be configured as well. Only SASL/PLAIN is supported. ===== password @@ -657,7 +636,7 @@ Kafka output broker event partitioning strategy. Must be one of `random`, *`random.group_events`*: Sets the number of events to be published to the same partition, before the partitioner selects a new partition by random. The - default value is 1 meaning after each event a new parition is picked randomly. + default value is 1 meaning after each event a new partition is picked randomly. *`round_robin.group_events`*: Sets the number of events to be published to the same partition, before the partitioner selects the next partition. The default @@ -689,7 +668,7 @@ The number of concurrent load-balanced Kafka output workers. Kafka metadata update settings. The metadata do contain information about brokers, topics, partition, and active leaders to use for publishing. -*`refresh_frequency`*:: Metadata refreash interval. Defaults to 10 minutes. +*`refresh_frequency`*:: Metadata refresh interval. Defaults to 10 minutes. *`retry.max`*:: Total number of metadata update retries when cluster is in middle of leader election. The default is 3. @@ -1104,7 +1083,7 @@ The client certificate key used for client authentication. This option is requir The passphrase used to decrypt an encrypted key stored in the configured `key` file. -===== versions +===== supported_protocols List of allowed SSL/TLS versions. If SSL/TLS server decides for protocol versions not configured, the connection will be dropped during or after the handshake. The diff --git a/vendor/github.com/elastic/beats/libbeat/docs/processors-config.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/processors-config.asciidoc index c1201dca..b7a71bc1 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/processors-config.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/processors-config.asciidoc @@ -114,7 +114,7 @@ For example, the following condition checks if the process name starts with [source,yaml] ----- -reqexp: +regexp: system.process.name: "foo.*" ----- @@ -353,6 +353,8 @@ processors: fields: ["field1", "field2", ...] process_array: false max_depth: 1 + target: + overwrite_keys: false ----------------------------------------------------- The `decode_json_fields` processor has the following configuration settings: @@ -361,6 +363,13 @@ The `decode_json_fields` processor has the following configuration settings: `process_array`:: (Optional) A boolean that specifies whether to process arrays. The default is false. `max_depth`:: (Optional) The maximum parsing depth. The default is 1. +`target`:: (Optional) The field under which the decoded JSON will be written. By +default the decoded JSON object replaces the string field from which it was +read. To merge the decoded JSON fields into the root of the event, specify +`target` with an empty value (`target:`). +`overwrite_keys`:: (Optional) A boolean that specifies whether keys that already +exist in the event are overwritten by keys from the decoded JSON object. The +default value is false. [[drop-event]] === drop_event diff --git a/vendor/github.com/elastic/beats/libbeat/docs/processors.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/processors.asciidoc index 923c1e5b..c393f16f 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/processors.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/processors.asciidoc @@ -6,7 +6,7 @@ //// Use the appropriate variables defined in the index.asciidoc file to //// resolve Beat names: beatname_uc and beatname_lc. //// Use the following include to pull this content into a doc file: -//// include::../../libbeat/docs/filtering.asciidoc[] +//// include::../../libbeat/docs/processors.asciidoc[] ////////////////////////////////////////////////////////////////////////// You can define processors in your configuration to process events before they diff --git a/vendor/github.com/elastic/beats/libbeat/docs/release.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/release.asciidoc index eb2a8444..4ec6392b 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/release.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/release.asciidoc @@ -6,6 +6,8 @@ -- This section summarizes the changes in each release. +* <> +* <> * <> * <> * <> diff --git a/vendor/github.com/elastic/beats/libbeat/docs/repositories.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/repositories.asciidoc index ea78654a..f723eb6c 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/repositories.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/repositories.asciidoc @@ -1,3 +1,14 @@ +////////////////////////////////////////////////////////////////////////// +//// This content is shared by all Elastic Beats. Make sure you keep the +//// descriptions here generic enough to work for all Beats that include +//// this file. When using cross references, make sure that the cross +//// references resolve correctly for any files that include this one. +//// Use the appropriate variables defined in the index.asciidoc file to +//// resolve Beat names: beatname_uc and beatname_lc. +//// Use the following include to pull this content into a doc file: +//// include::../../libbeat/docs/setup-repositories.asciidoc[] +////////////////////////////////////////////////////////////////////////// + [[setup-repositories]] === Repositories for APT and YUM @@ -52,18 +63,18 @@ Simply delete the `deb-src` entry from the `/etc/apt/sources.list` file, and the ================================================== . Run `apt-get update`, and the repository is ready for use. For example, you can -install Filebeat by running: +install {beatname_uc} by running: + -[source,sh] +["source","sh",subs="attributes"] -------------------------------------------------- -sudo apt-get update && sudo apt-get install filebeat +sudo apt-get update && sudo apt-get install {beatname_lc} -------------------------------------------------- -. To configure the beat to start automatically during boot, run: +. To configure the Beat to start automatically during boot, run: + -[source,sh] +["source","sh",subs="attributes"] -------------------------------------------------- -sudo update-rc.d filebeat defaults 95 10 +sudo update-rc.d {beatname_lc} defaults 95 10 -------------------------------------------------- [float] @@ -93,17 +104,17 @@ autorefresh=1 type=rpm-md -------------------------------------------------- + -Your repository is ready to use. For example, you can install Filebeat by +Your repository is ready to use. For example, you can install {beatname_uc} by running: + -[source,sh] +["source","sh",subs="attributes"] -------------------------------------------------- -sudo yum install filebeat +sudo yum install {beatname_lc} -------------------------------------------------- -. To configure the beat to start automatically during boot, run: +. To configure the Beat to start automatically during boot, run: + -[source,sh] +["source","sh",subs="attributes"] -------------------------------------------------- -sudo chkconfig --add filebeat +sudo chkconfig --add {beatname_lc} -------------------------------------------------- diff --git a/vendor/github.com/elastic/beats/libbeat/docs/shared-template-load.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/shared-template-load.asciidoc index 8df3cbc3..78462f86 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/shared-template-load.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/shared-template-load.asciidoc @@ -74,7 +74,7 @@ curl -XPUT 'http://localhost:9200/_template/{beatname_lc}' -d@/etc/{beatname_lc} ["source","sh",subs="attributes,callouts"] ---------------------------------------------------------------------- -cd {beatname_lc}-{version}-darwin +cd {beatname_lc}-{version}-darwin-x86_64 curl -XPUT 'http://localhost:9200/_template/{beatname_lc}' -d@{beatname_lc}.template.json ---------------------------------------------------------------------- diff --git a/vendor/github.com/elastic/beats/libbeat/docs/version.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/version.asciidoc index ec0507ba..3ba132e0 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/version.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/version.asciidoc @@ -1,3 +1,3 @@ -:stack-version: 5.1.1 -:doc-branch: 5.1 -:go-version: 1.7.1 +:stack-version: 5.2.1 +:doc-branch: 5.2 +:go-version: 1.7.4 diff --git a/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/client_integration_test.go b/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/client_integration_test.go index 15683169..6d81a547 100644 --- a/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/client_integration_test.go +++ b/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/client_integration_test.go @@ -35,20 +35,23 @@ func TestCheckTemplate(t *testing.T) { func TestLoadTemplate(t *testing.T) { + // Setup ES + client := GetTestingElasticsearch() + err := client.Connect(5 * time.Second) + assert.Nil(t, err) + // Load template absPath, err := filepath.Abs("../../tests/files/") assert.NotNil(t, absPath) assert.Nil(t, err) templatePath := absPath + "/template.json" + if strings.HasPrefix(client.Connection.version, "2.") { + templatePath = absPath + "/template-es2x.json" + } content, err := readTemplate(templatePath) assert.Nil(t, err) - // Setup ES - client := GetTestingElasticsearch() - err = client.Connect(5 * time.Second) - assert.Nil(t, err) - templateName := "testbeat" // Load template diff --git a/vendor/github.com/elastic/beats/libbeat/outputs/kafka/kafka.go b/vendor/github.com/elastic/beats/libbeat/outputs/kafka/kafka.go index ca2227cc..69795acd 100644 --- a/vendor/github.com/elastic/beats/libbeat/outputs/kafka/kafka.go +++ b/vendor/github.com/elastic/beats/libbeat/outputs/kafka/kafka.go @@ -91,7 +91,9 @@ var ( "0.10.0.0": sarama.V0_10_0_0, "0.10.0.1": sarama.V0_10_0_1, "0.10.0": sarama.V0_10_0_1, - "0.10": sarama.V0_10_0_1, + "0.10.1.0": sarama.V0_10_1_0, + "0.10.1": sarama.V0_10_1_0, + "0.10": sarama.V0_10_1_0, } ) diff --git a/vendor/github.com/elastic/beats/libbeat/outputs/outil/select_test.go b/vendor/github.com/elastic/beats/libbeat/outputs/outil/select_test.go index 369c1c83..41a1ddbf 100644 --- a/vendor/github.com/elastic/beats/libbeat/outputs/outil/select_test.go +++ b/vendor/github.com/elastic/beats/libbeat/outputs/outil/select_test.go @@ -1,6 +1,7 @@ package outil import ( + "strings" "testing" "github.com/elastic/beats/libbeat/common" @@ -49,24 +50,24 @@ func TestSelector(t *testing.T) { { "missing format string key with default in rule", `keys: - - key: '%{[key]}' - default: value`, + - key: '%{[key]}' + default: value`, common.MapStr{}, "value", }, { "empty format string key with default in rule", `keys: - - key: '%{[key]}' - default: value`, + - key: '%{[key]}' + default: value`, common.MapStr{"key": ""}, "value", }, { "missing format string key with constant in next rule", `keys: - - key: '%{[key]}' - - key: value`, + - key: '%{[key]}' + - key: value`, common.MapStr{}, "value", }, @@ -79,83 +80,83 @@ func TestSelector(t *testing.T) { { "apply mapping", `keys: - - key: '%{[key]}' - mappings: - v: value`, + - key: '%{[key]}' + mappings: + v: value`, common.MapStr{"key": "v"}, "value", }, { "apply mapping with default on empty key", `keys: - - key: '%{[key]}' - default: value - mappings: - v: 'v'`, + - key: '%{[key]}' + default: value + mappings: + v: 'v'`, common.MapStr{"key": ""}, "value", }, { "apply mapping with default on empty lookup", `keys: - - key: '%{[key]}' - default: value - mappings: - v: ''`, + - key: '%{[key]}' + default: value + mappings: + v: ''`, common.MapStr{"key": "v"}, "value", }, { "apply mapping without match", `keys: - - key: '%{[key]}' - mappings: - v: '' - - key: value`, + - key: '%{[key]}' + mappings: + v: '' + - key: value`, common.MapStr{"key": "x"}, "value", }, { "mapping with constant key", `keys: - - key: k - mappings: - k: value`, + - key: k + mappings: + k: value`, common.MapStr{}, "value", }, { "mapping with missing constant key", `keys: - - key: unknown - mappings: {k: wrong} - - key: value`, + - key: unknown + mappings: {k: wrong} + - key: value`, common.MapStr{}, "value", }, { "mapping with missing constant key, but default", `keys: - - key: unknown - default: value - mappings: {k: wrong}`, + - key: unknown + default: value + mappings: {k: wrong}`, common.MapStr{}, "value", }, { "matching condition", `keys: - - key: value - when.equals.test: test`, + - key: value + when.equals.test: test`, common.MapStr{"test": "test"}, "value", }, { "failing condition", `keys: - - key: wrong - when.equals.test: test - - key: value`, + - key: wrong + when.equals.test: test + - key: value`, common.MapStr{"test": "x"}, "value", }, @@ -164,9 +165,10 @@ func TestSelector(t *testing.T) { for i, test := range tests { t.Logf("run (%v): %v", i, test.title) - cfg, err := common.NewConfigWithYAML([]byte(test.config), "test") + yaml := strings.Replace(test.config, "\t", " ", -1) + cfg, err := common.NewConfigWithYAML([]byte(yaml), "test") if err != nil { - t.Error(err) + t.Errorf("YAML parse error: %v\n%v", err, yaml) continue } diff --git a/vendor/github.com/elastic/beats/libbeat/outputs/tls.go b/vendor/github.com/elastic/beats/libbeat/outputs/tls.go index d8915f47..79fd297a 100644 --- a/vendor/github.com/elastic/beats/libbeat/outputs/tls.go +++ b/vendor/github.com/elastic/beats/libbeat/outputs/tls.go @@ -269,12 +269,7 @@ func loadCertificateAuthorities(CAs []string) (*x509.CertPool, []error) { return roots, errors } -func (cs *tlsCipherSuite) Unpack(in interface{}) error { - s, ok := in.(string) - if !ok { - return fmt.Errorf("tls cipher suite must be an identifier") - } - +func (cs *tlsCipherSuite) Unpack(s string) error { suite, found := tlsCipherSuites[s] if !found { return fmt.Errorf("invalid tls cipher suite '%v'", s) @@ -284,12 +279,7 @@ func (cs *tlsCipherSuite) Unpack(in interface{}) error { return nil } -func (ct *tlsCurveType) Unpack(in interface{}) error { - s, ok := in.(string) - if !ok { - return fmt.Errorf("tls curve type must be an identifier") - } - +func (ct *tlsCurveType) Unpack(s string) error { t, found := tlsCurveTypes[s] if !found { return fmt.Errorf("invalid tls curve type '%v'", s) @@ -297,5 +287,4 @@ func (ct *tlsCurveType) Unpack(in interface{}) error { *ct = t return nil - } diff --git a/vendor/github.com/elastic/beats/libbeat/outputs/transport/tls.go b/vendor/github.com/elastic/beats/libbeat/outputs/transport/tls.go index ef6dc9b0..fff921cc 100644 --- a/vendor/github.com/elastic/beats/libbeat/outputs/transport/tls.go +++ b/vendor/github.com/elastic/beats/libbeat/outputs/transport/tls.go @@ -234,12 +234,7 @@ func (v TLSVersion) String() string { return "unknown" } -func (v *TLSVersion) Unpack(in interface{}) error { - s, ok := in.(string) - if !ok { - return fmt.Errorf("tls version must be an identifier") - } - +func (v *TLSVersion) Unpack(s string) error { version, found := tlsProtocolVersions[s] if !found { return fmt.Errorf("invalid tls version '%v'", s) diff --git a/vendor/github.com/elastic/beats/libbeat/processors/actions/decode_json_fields.go b/vendor/github.com/elastic/beats/libbeat/processors/actions/decode_json_fields.go index 7c05f56a..772c481c 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/actions/decode_json_fields.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/actions/decode_json_fields.go @@ -14,15 +14,19 @@ import ( ) type decodeJSONFields struct { - fields []string - maxDepth int - processArray bool + fields []string + maxDepth int + overwriteKeys bool + processArray bool + target *string } type config struct { - Fields []string `config:"fields"` - MaxDepth int `config:"max_depth" validate:"min=1"` - ProcessArray bool `config:"process_array"` + Fields []string `config:"fields"` + MaxDepth int `config:"max_depth" validate:"min=1"` + OverwriteKeys bool `config:"overwrite_keys"` + ProcessArray bool `config:"process_array"` + Target *string `config:"target"` } var ( @@ -38,7 +42,7 @@ func init() { processors.RegisterPlugin("decode_json_fields", configChecked(newDecodeJSONFields, requireFields("fields"), - allowedFields("fields", "max_depth", "process_array"))) + allowedFields("fields", "max_depth", "overwrite_keys", "process_array", "target", "when"))) } func newDecodeJSONFields(c common.Config) (processors.Processor, error) { @@ -51,7 +55,7 @@ func newDecodeJSONFields(c common.Config) (processors.Processor, error) { return nil, fmt.Errorf("fail to unpack the decode_json_fields configuration: %s", err) } - f := decodeJSONFields{fields: config.Fields, maxDepth: config.MaxDepth, processArray: config.ProcessArray} + f := decodeJSONFields{fields: config.Fields, maxDepth: config.MaxDepth, overwriteKeys: config.OverwriteKeys, processArray: config.ProcessArray, target: config.Target} return f, nil } @@ -75,7 +79,21 @@ func (f decodeJSONFields) Run(event common.MapStr) (common.MapStr, error) { continue } - _, err = event.Put(field, output) + if f.target != nil { + if len(*f.target) > 0 { + _, err = event.Put(*f.target, output) + } else { + switch t := output.(type) { + default: + errs = append(errs, errors.New("Error trying to add target to root.").Error()) + case map[string]interface{}: + jsontransform.WriteJSONKeys(event, t, f.overwriteKeys, "json_error") + } + } + } else { + _, err = event.Put(field, output) + } + if err != nil { debug("Error trying to Put value %v for field : %s", output, field) errs = append(errs, err.Error()) diff --git a/vendor/github.com/elastic/beats/libbeat/processors/actions/decode_json_fields_test.go b/vendor/github.com/elastic/beats/libbeat/processors/actions/decode_json_fields_test.go index f60e94d3..053f5279 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/actions/decode_json_fields_test.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/actions/decode_json_fields_test.go @@ -111,6 +111,64 @@ func TestValidJSONDepthTwo(t *testing.T) { } +func TestTargetOption(t *testing.T) { + input := common.MapStr{ + "msg": "{\"log\":\"{\\\"level\\\":\\\"info\\\"}\",\"stream\":\"stderr\",\"count\":3}", + "pipeline": "us1", + } + + testConfig, _ = common.NewConfigFrom(map[string]interface{}{ + "fields": fields, + "process_array": false, + "max_depth": 2, + "target": "doc", + }) + + actual := getActualValue(t, testConfig, input) + + expected := common.MapStr{ + "doc": map[string]interface{}{ + "log": map[string]interface{}{ + "level": "info", + }, + "stream": "stderr", + "count": 3, + }, + "msg": "{\"log\":\"{\\\"level\\\":\\\"info\\\"}\",\"stream\":\"stderr\",\"count\":3}", + "pipeline": "us1", + } + + assert.Equal(t, expected.String(), actual.String()) +} + +func TestTargetRootOption(t *testing.T) { + input := common.MapStr{ + "msg": "{\"log\":\"{\\\"level\\\":\\\"info\\\"}\",\"stream\":\"stderr\",\"count\":3}", + "pipeline": "us1", + } + + testConfig, _ = common.NewConfigFrom(map[string]interface{}{ + "fields": fields, + "process_array": false, + "max_depth": 2, + "target": "", + }) + + actual := getActualValue(t, testConfig, input) + + expected := common.MapStr{ + "log": map[string]interface{}{ + "level": "info", + }, + "stream": "stderr", + "count": 3, + "msg": "{\"log\":\"{\\\"level\\\":\\\"info\\\"}\",\"stream\":\"stderr\",\"count\":3}", + "pipeline": "us1", + } + + assert.Equal(t, expected.String(), actual.String()) +} + func getActualValue(t *testing.T, config *common.Config, input common.MapStr) common.MapStr { if testing.Verbose() { logp.LogInit(logp.LOG_DEBUG, "", false, true, []string{"*"}) diff --git a/vendor/github.com/elastic/beats/libbeat/scripts/Makefile b/vendor/github.com/elastic/beats/libbeat/scripts/Makefile index 2f79609d..482060d5 100755 --- a/vendor/github.com/elastic/beats/libbeat/scripts/Makefile +++ b/vendor/github.com/elastic/beats/libbeat/scripts/Makefile @@ -1,11 +1,15 @@ - ### VARIABLE SETUP ### - -BEATNAME?=libbeat -BEAT_DESCRIPTION?=Sends events to Elasticsearch or Logstash +### Application using libbeat may override the following variables in their Makefile +BEATNAME?=libbeat ## @packaging Name of the application +BEAT_DESCRIPTION?=Sends events to Elasticsearch or Logstash ## @packaging Description of the application +BEAT_VENDOR?=Elastic ## @packaging Name of the vendor of the application +BEAT_LICENSE?=ASL 2.0 ## @packaging Software license of the application +BEAT_DOC_URL?=https://www.elastic.co/guide/en/beats/${BEATNAME}/current/index.html ## @packaging Link to the user documentation of the application BEAT_DIR?=github.com/elastic/beats/${BEATNAME} -ES_BEATS?=.. -GOPACKAGES?=${BEAT_DIR}/... +ES_BEATS?=..## @community_beat Must be set to ./vendor/github.com/elastic/beats +GOPACKAGES?=${BEAT_DIR}/...## @community_beat Must be set to $(shell glide novendor) +PACKER_TEMPLATES_DIR?=${ES_BEATS}/dev-tools/packer ## @Building Directory of templates that are used by "make package" +NOTICE_FILE?=../NOTICE # Makefile for a custom beat that includes this libbeat/scripts/Makefile: # if glide is used to manage vendor dependencies, @@ -39,25 +43,27 @@ COVERAGE_TOOL=${GOPATH}/bin/gotestcover COVERAGE_TOOL_REPO=github.com/elastic/beats/vendor/github.com/pierrre/gotestcover PROCESSES?= 4 TIMEOUT?= 90 -TEST_ENVIRONMENT?=false -SYSTEM_TESTS?=false -GOX_OS?=linux darwin windows solaris freebsd netbsd openbsd -TESTING_ENVIRONMENT?=snapshot -DOCKER_COMPOSE_PROJECT_NAME?=${BEATNAME}_${TESTING_ENVIRONMENT} -DOCKER_COMPOSE?=docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} -f ${PWD}/../testing/environments/base.yml -f ${PWD}/../testing/environments/${TESTING_ENVIRONMENT}.yml -f docker-compose.yml -DOCKER_CACHE?=1 # If set to 0, all docker images are created without cache +TEST_ENVIRONMENT?=false ## @testing if true, "make testsuite" runs integration tests and system tests in a dockerized test environment +SYSTEM_TESTS?=false ## @testing if true, "make test" and "make testsuite" run unit tests and system tests +GOX_OS?=linux darwin windows solaris freebsd netbsd openbsd ## @Building List of all OS to be supported by "make crosscompile". +TESTING_ENVIRONMENT?=snapshot ## @testing The name of the environment under test +DOCKER_COMPOSE_PROJECT_NAME?=${BEATNAME}_${TESTING_ENVIRONMENT} ## @testing The name of the docker-compose project used by the integration and system tests +DOCKER_COMPOSE?=TESTING_ENVIRONMENT=${TESTING_ENVIRONMENT} docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} -f docker-compose.yml +DOCKER_CACHE?=1 ## @miscellaneous If set to 0, all docker images are created without cache GOPACKAGES_COMMA_SEP=$(subst $(space),$(comma),$(strip ${GOPACKAGES})) PYTHON_ENV?=${BUILD_DIR}/python-env -BUILDID?=$(shell git rev-parse HEAD) - -CGO?=false +BUILDID?=$(shell git rev-parse HEAD) ## @Building The build ID +VIRTUALENV_PARAMS?= +INTEGRATION_TESTS?= +CGO?=false ## @building if true, Build with C Go support # Cross compiling targets -TARGETS?="linux/amd64 linux/386 windows/amd64 windows/386 darwin/amd64" -# Cross compiling targets to be build on debian 6. This only applies if CGO is enabled -TARGETS_OLD?="" -PACKAGES?=${BEATNAME}/deb ${BEATNAME}/rpm ${BEATNAME}/darwin ${BEATNAME}/win ${BEATNAME}/bin -SNAPSHOT?=yes +TARGETS?="linux/amd64 linux/386 windows/amd64 windows/386 darwin/amd64" ## @building list of platforms/architecture to be built by "make package" +TARGETS_OLD?="" ## @building list of Debian6 architecture to be built by "make package" when CGO is true +PACKAGES?=${BEATNAME}/deb ${BEATNAME}/rpm ${BEATNAME}/darwin ${BEATNAME}/win ${BEATNAME}/bin ## @Building List of OS to be supported by "make package" +SNAPSHOT?=yes ## @Building If yes, builds a snapshot version +BEATS_BUILDER_IMAGE?=tudorg/beats-builder ## @Building Name of the docker image to use when packaging the application +BEATS_BUILDER_DEB6_IMAGE?=tudorg/beats-builder-deb6 ## @Building Name of the docker image to use when packaging the application for Debian 6 ifeq ($(DOCKER_CACHE),0) DOCKER_NOCACHE=--no-cache @@ -71,49 +77,41 @@ endif ### BUILDING ### -# Builds beat -${BEATNAME}: $(GOFILES_ALL) + +${BEATNAME}: $(GOFILES_ALL) ## @build build the beat application go build # Create test coverage binary ${BEATNAME}.test: $(GOFILES_ALL) go test $(RACE) -c -coverpkg ${GOPACKAGES_COMMA_SEP} -# Cross-compile beat for the OS'es specified in GOX_OS variable. -# The binaries are placed in the build/bin directory. .PHONY: crosscompile +crosscompile: ## @build Cross-compile beat for the OS'es specified in GOX_OS variable. The binaries are placed in the build/bin directory. crosscompile: $(GOFILES) go get github.com/mitchellh/gox mkdir -p ${BUILD_DIR}/bin gox -output="${BUILD_DIR}/bin/{{.Dir}}-{{.OS}}-{{.Arch}}" -os="${GOX_OS}" ${GOX_FLAGS} -# Checks project and source code if everything is according to standard + .PHONY: check -check: +check: ## @build Checks project and source code if everything is according to standard @gofmt -l ${GOFILES_NOVENDOR} | (! grep . -q) || (echo "Code differs from gofmt's style" && false) go vet ${GOPACKAGES} -# Runs gofmt -w on the project's source code, modifying any files that do not -# match its style. .PHONY: fmt -fmt: +fmt: ## @build Runs gofmt -w on the project's source code, modifying any files that do not match its style. gofmt -l -w ${GOFILES_NOVENDOR} -# Runs gofmt -s -w on the project's source code, modifying any files that do not -# match its style. .PHONY: simplify -simplify: +simplify: ## @build Runs gofmt -s -w on the project's source code, modifying any files that do not match its style. gofmt -l -s -w ${GOFILES_NOVENDOR} -# Cleans up directory and source code with gofmt .PHONY: clean -clean: +clean:: ## @build Cleans up all files generated by the build steps rm -rf build ${BEATNAME} ${BEATNAME}.test ${BEATNAME}.exe ${BEATNAME}.test.exe _meta/fields.generated.yml -# Shortcut for continuous integration -# This should always run before merging. .PHONY: ci -ci: +ci: ## @build Shortcut for continuous integration. This should always run before merging. $(MAKE) $(MAKE) check $(MAKE) testsuite @@ -121,7 +119,6 @@ ci: ### Testing ### # Unless stated otherwise, all tests are always run with coverage reporting enabled. - # Prepration for tests .PHONY: prepare-tests prepare-tests: @@ -129,58 +126,60 @@ prepare-tests: # gotestcover is needed to fetch coverage for multiple packages go get ${COVERAGE_TOOL_REPO} -# Runs the unit tests with coverage -# Race is not enabled for unit tests because tests run much slower. .PHONY: unit-tests +unit-tests: ## @testing Runs the unit tests with coverage. Race is not enabled for unit tests because tests run much slower. unit-tests: prepare-tests $(COVERAGE_TOOL) $(RACE) -coverprofile=${COVERAGE_DIR}/unit.cov ${GOPACKAGES} -# Runs the unit tests without coverage reports. .PHONY: unit -unit: +unit: ## @testing Runs the unit tests without coverage reports. go test $(RACE) ${GOPACKAGES} -# Run integration tests. Unit tests are run as part of the integration tests. .PHONY: integration-tests +integration-tests: ## @testing Run integration tests. Unit tests are run as part of the integration tests. integration-tests: prepare-tests $(COVERAGE_TOOL) -tags=integration $(RACE) -coverprofile=${COVERAGE_DIR}/integration.cov ${GOPACKAGES} -# Runs the integration inside a virtual environment. This can be run on any docker-machine (local, remote) +# .PHONY: integration-tests-environment +integration-tests-environment: ## @testing Runs the integration inside a virtual environment. This can be run on any docker-machine (local, remote) integration-tests-environment: prepare-tests build-image ${DOCKER_COMPOSE} run beat make integration-tests RACE_DETECTOR=$(RACE_DETECTOR) # Runs the system tests .PHONY: system-tests -system-tests: ${BEATNAME}.test prepare-tests python-env - . ${PYTHON_ENV}/bin/activate; INTEGRATION_TESTS=1 nosetests -w tests/system --process-timeout=$(TIMEOUT) --with-timer +system-tests: ## @testing Runs the system tests +system-tests: ${BEATNAME}.test prepare-tests python-env ${ES_BEATS}/libbeat/dashboards/import_dashboards + . ${PYTHON_ENV}/bin/activate; INTEGRATION_TESTS=${INTEGRATION_TESTS} nosetests -w tests/system --process-timeout=$(TIMEOUT) --with-timer python ${ES_BEATS}/dev-tools/aggregate_coverage.py -o ${COVERAGE_DIR}/system.cov ./build/system-tests/run # Runs the system tests .PHONY: system-tests-environment +system-tests-environment: ## @testing Runs the system tests inside a virtual environment. This can be run on any docker-machine (local, remote) system-tests-environment: prepare-tests build-image - ${DOCKER_COMPOSE} run beat make system-tests + ${DOCKER_COMPOSE} run -e INTEGRATION_TESTS=1 beat make system-tests + -# Runs system tests without coverage reports and in parallel .PHONY: fast-system-tests +fast-system-tests: ## @testing Runs system tests without coverage reports and in parallel fast-system-tests: ${BEATNAME}.test python-env . ${PYTHON_ENV}/bin/activate; nosetests -w tests/system --processes=$(PROCESSES) --process-timeout=$(TIMEOUT) # Run benchmark tests .PHONY: benchmark-tests -benchmark-tests: +benchmark-tests: ## @testing Runs bechmarks (NOT YET IMPLEMENTED) # No benchmark tests exist so far #go test -bench=. ${GOPACKAGES} # Run load tests .PHONY: load-tests -load-tests: +load-tests: ## @testing Runs load tests . ${PYTHON_ENV}/bin/activate; LOAD_TESTS=1 nosetests -w tests/system --processes=$(PROCESSES) --process-timeout=$(TIMEOUT) -a 'load' # Sets up the virtual python environment .PHONY: python-env python-env: ${ES_BEATS}/libbeat/tests/system/requirements.txt - @test -d ${PYTHON_ENV} || virtualenv ${PYTHON_ENV} + @test -d ${PYTHON_ENV} || virtualenv ${VIRTUALENV_PARAMS} ${PYTHON_ENV} @. ${PYTHON_ENV}/bin/activate && pip install -q --upgrade pip ; \ if [ -a ./tests/system/requirements.txt ] && [ ! ${ES_BEATS}/libbeat/tests/system/requirements.txt -ef ./tests/system/requirements.txt ] ; then \ . ${PYTHON_ENV}/bin/activate && pip install -qUr ${ES_BEATS}/libbeat/tests/system/requirements.txt -Ur ./tests/system/requirements.txt ; \ @@ -188,30 +187,30 @@ python-env: ${ES_BEATS}/libbeat/tests/system/requirements.txt . ${PYTHON_ENV}/bin/activate && pip install -qUr ${ES_BEATS}/libbeat/tests/system/requirements.txt ; \ fi -# Runs unit and system tests without coverage reports + .PHONY: test +test: ## @testing Runs unit and system tests without coverage reports test: unit if [ $(SYSTEM_TESTS) = true ]; then \ $(MAKE) fast-system-tests; \ fi -# Runs all tests and generates the coverage reports .PHONY: testsuite +testsuite: ## @testing Runs all tests and generates the coverage reports testsuite: clean collect $(MAKE) unit-tests # Setups environment if TEST_ENVIRONMENT is set to true + # Only runs integration tests with test environemtn if [ $(TEST_ENVIRONMENT) = true ]; then \ $(MAKE) integration-tests-environment; \ - else \ - $(MAKE) integration-tests; \ fi - # Runs system tests if SYSTEM_TESTS is set to true + # Runs system and system integration tests if SYSTEM_TESTS is set to true if [ $(SYSTEM_TESTS) = true ]; then \ if [ $(TEST_ENVIRONMENT) = true ]; then \ - $(MAKE) system-tests-environment; \ - else \ + $(MAKE) system-tests-environment; \ + else \ $(MAKE) system-tests; \ fi \ fi @@ -219,6 +218,10 @@ testsuite: clean collect $(MAKE) benchmark-tests $(MAKE) coverage-report + if [ $(TEST_ENVIRONMENT) = true ]; then \ + $(MAKE) fix-permissions; \ + fi + # Generates a coverage report from the existing coverage files .PHONY: coverage-report coverage-report: @@ -228,8 +231,9 @@ coverage-report: test ! -s ${COVERAGE_DIR}/system.cov || go tool cover -html=${COVERAGE_DIR}/system.cov -o ${COVERAGE_DIR}/system.html test ! -s ${COVERAGE_DIR}/unit.cov || go tool cover -html=${COVERAGE_DIR}/unit.cov -o ${COVERAGE_DIR}/unit.html -# Update expects the most recent version of libbeat in the GOPATH + .PHONY: update +update: ## @build Update expects the most recent version of libbeat in the GOPATH update: python-env collect # Update config @@ -255,24 +259,19 @@ update: python-env collect . ${PYTHON_ENV}/bin/activate && python ${ES_BEATS}/libbeat/scripts/generate_template.py $(PWD) ${BEATNAME} ${ES_BEATS} . ${PYTHON_ENV}/bin/activate && python ${ES_BEATS}/libbeat/scripts/generate_template.py --es2x $(PWD) ${BEATNAME} ${ES_BEATS} - # Update docs version - cp ${ES_BEATS}/libbeat/docs/version.asciidoc docs/version.asciidoc - # Generate index-pattern echo "Generate index pattern" -rm -f $(PWD)/_meta/kibana/index-pattern/${BEATNAME}.json mkdir -p $(PWD)/_meta/kibana/index-pattern . ${PYTHON_ENV}/bin/activate && python ${ES_BEATS}/libbeat/scripts/generate_index_pattern.py --index '${BEATNAME}-*' --libbeat ${ES_BEATS}/libbeat --beat $(PWD) - -# Builds the documents for the beat .PHONY: docs -docs: +docs: ## @build Builds the documents for the beat sh ${ES_BEATS}/libbeat/scripts/build_docs.sh ${BEATNAME} -# Preview the documents for the beat in the browser + .PHONY: docs-preview -docs-preview: +docs-preview: ## @build Preview the documents for the beat in the browser if [ ! -d "build/docs" ]; then $(MAKE) docs; fi; ${BUILD_DIR}/docs/build_docs.pl --chunk=1 -open chunk=1 -open --doc ${GOPATH}/src/github.com/elastic/beats/${BEATNAME}/docs/index.asciidoc -out ${BUILD_DIR}/html_docs @@ -284,9 +283,11 @@ ES_URL?=http://localhost:9200 export-dashboards: python-env update . ${PYTHON_ENV}/bin/activate && python ${ES_BEATS}/dev-tools/export_dashboards.py --url ${ES_URL} --dir $(shell pwd)/_meta/kibana --regex ${BEATNAME}-* +${ES_BEATS}/libbeat/dashboards/import_dashboards: + $(MAKE) -C ${ES_BEATS}/libbeat/dashboards import_dashboards + .PHONY: import-dashboards -import-dashboards: update - $(MAKE) -C ${ES_BEATS}/libbeat/dashboards import_dasboards +import-dashboards: update ${ES_BEATS}/libbeat/dashboards/import_dashboards ${ES_BEATS}/libbeat/dashboards/import_dashboards -es ${ES_URL} -dir ${PWD}/_meta/kibana ### CONTAINER ENVIRONMENT #### @@ -329,6 +330,9 @@ HOME_PREFIX?=/tmp/${BEATNAME} install-home: install -d -m 755 ${HOME_PREFIX}/scripts/ install -m 755 ${ES_BEATS}/libbeat/scripts/migrate_beat_config_1_x_to_5_0.py ${HOME_PREFIX}/scripts/ + if [ -a ${NOTICE_FILE} ]; then \ + install -m 644 ${NOTICE_FILE} ${HOME_PREFIX}/; \ + fi # Prepares for packaging. Builds binaries and creates homedir data .PHONY: prepare-package @@ -336,6 +340,7 @@ prepare-package: # cross compile on ubuntu docker run --rm \ -v $(abspath ${ES_BEATS}/dev-tools/packer/xgo-scripts):/scripts \ + -v $(abspath ${PACKER_TEMPLATES_DIR}):/templates \ -v $(abspath ../):/source \ -v $(BUILD_DIR):/build \ -e PUREGO="yes" \ @@ -344,7 +349,7 @@ prepare-package: -e SOURCE=/source \ -e TARGETS=${TARGETS} \ -e BUILDID=${BUILDID} \ - tudorg/beats-builder \ + ${BEATS_BUILDER_IMAGE} \ ${BEAT_DIR} # Prepares for packaging. Builds binaries with cgo @@ -354,6 +359,7 @@ prepare-package-cgo: # cross compile on ubuntu docker run --rm \ -v $(abspath ${ES_BEATS}/dev-tools/packer/xgo-scripts):/scripts \ + -v $(abspath ${PACKER_TEMPLATES_DIR}):/templates \ -v $(abspath ../):/source \ -v $(BUILD_DIR):/build \ -e PACK=${BEATNAME} \ @@ -361,20 +367,21 @@ prepare-package-cgo: -e SOURCE=/source \ -e TARGETS=${TARGETS} \ -e BUILDID=${BUILDID} \ - tudorg/beats-builder \ + ${BEATS_BUILDER_IMAGE} \ ${BEAT_DIR} # linux builds on debian 6 for compatibility docker run --rm \ -v ${BUILD_DIR}:/build \ -v $(abspath ${ES_BEATS}/dev-tools/packer/xgo-scripts):/scripts \ + -v $(abspath ${PACKER_TEMPLATES_DIR}):/templates \ -v $(abspath ..):/source \ -e PACK=${BEATNAME} \ -e BEFORE_BUILD=before_build.sh \ -e SOURCE=/source \ -e TARGETS=${TARGETS_OLD} \ -e BUILDID=${BUILDID} \ - tudorg/beats-builder-deb6 \ + ${BEATS_BUILDER_DEB6_IMAGE} \ ${BEAT_DIR} # Prepares images for packaging @@ -382,8 +389,8 @@ prepare-package-cgo: package-setup: $(MAKE) -C ${ES_BEATS}/dev-tools/packer deps images -# Create binary packages for the beat. .PHONY: package +package: ## @packaging Create binary packages for the beat. package: package-setup echo "Start building packages for ${BEATNAME}" @@ -395,7 +402,15 @@ package: package-setup echo "beat_url: https://${BEAT_DIR}" >> ${BUILD_DIR}/package.yml echo "beat_repo: ${BEAT_DIR}" >> ${BUILD_DIR}/package.yml echo "beat_description: ${BEAT_DESCRIPTION}" >> ${BUILD_DIR}/package.yml - cat ${ES_BEATS}/dev-tools/packer/version.yml >> ${BUILD_DIR}/package.yml + echo "beat_vendor: ${BEAT_VENDOR}" >> ${BUILD_DIR}/package.yml + echo "beat_license: ${BEAT_LICENSE}" >> ${BUILD_DIR}/package.yml + echo "beat_doc_url: ${BEAT_DOC_URL}" >> ${BUILD_DIR}/package.yml + + if [ -a version.yml ]; then \ + cat version.yml >> ${BUILD_DIR}/package.yml; \ + else \ + cat ${ES_BEATS}/dev-tools/packer/version.yml >> ${BUILD_DIR}/package.yml; \ + fi if [ $(CGO) = true ]; then \ $(MAKE) prepare-package-cgo; \ @@ -404,7 +419,7 @@ package: package-setup fi SNAPSHOT=${SNAPSHOT} BUILDID=${BUILDID} BEAT_DIR=${BEAT_DIR} BUILD_DIR=${BUILD_DIR} $(MAKE) -C ${ES_BEATS}/dev-tools/packer ${PACKAGES} ${BUILD_DIR}/upload/build_id.txt - + $(MAKE) fix-permissions echo "Finished packages for ${BEATNAME}" package-dashboards: package-setup @@ -412,3 +427,19 @@ package-dashboards: package-setup cp -r _meta/kibana ${BUILD_DIR}/dashboards # build the dashboards package BEATNAME=${BEATNAME} BUILD_DIR=${BUILD_DIR} SNAPSHOT=$(SNAPSHOT) $(MAKE) -C ${ES_BEATS}/dev-tools/packer package-dashboards ${shell pwd}/build/upload/build_id.txt + +fix-permissions: + # Change ownership of all files inside /build folder from root/root to current user/group + docker run -v ${BUILD_DIR}:/build alpine:3.4 sh -c "chown -R $(shell id -u):$(shell id -g) /build" + +set_version: ## @packaging VERSION=x.y.z set the version of the beat to x.y.z + ${ES_BEATS}/dev-tools/set_version ${VERSION} + +get_version: ## @packaging get the version of the beat. + @${ES_BEATS}/dev-tools/get_version + +help: ## @help Show this help. + @python ${ES_BEATS}/libbeat/scripts/generate_makefile_doc.py $(MAKEFILE_LIST) + +help_variables: ## @help Show Makefile customizable variables. + @python ${ES_BEATS}/libbeat/scripts/generate_makefile_doc.py --variables $(MAKEFILE_LIST) diff --git a/vendor/github.com/elastic/beats/libbeat/scripts/docker-entrypoint.sh b/vendor/github.com/elastic/beats/libbeat/scripts/docker-entrypoint.sh index 32b0c280..44e7c2cd 100755 --- a/vendor/github.com/elastic/beats/libbeat/scripts/docker-entrypoint.sh +++ b/vendor/github.com/elastic/beats/libbeat/scripts/docker-entrypoint.sh @@ -81,7 +81,7 @@ waitFor() { done echo - echo >&2 '${3} is not available' + echo >&2 "${3} is not available" echo >&2 "Address: ${1}:${2}" } diff --git a/vendor/github.com/elastic/beats/libbeat/scripts/generate_index_pattern.py b/vendor/github.com/elastic/beats/libbeat/scripts/generate_index_pattern.py index 8bd2355f..6efa6d78 100644 --- a/vendor/github.com/elastic/beats/libbeat/scripts/generate_index_pattern.py +++ b/vendor/github.com/elastic/beats/libbeat/scripts/generate_index_pattern.py @@ -35,7 +35,7 @@ def field_to_json(desc, path, output): global unique_fields if path in unique_fields: - print "ERROR: Field", path, "is duplicated. Please delete it and try again. Fields already are", unique_fields + print("ERROR: Field {} is duplicated. Please delete it and try again. Fields already are {}".format(path, ", ".join(unique_fields))) sys.exit(1) else: unique_fields.append(path) diff --git a/vendor/github.com/elastic/beats/libbeat/scripts/generate_makefile_doc.py b/vendor/github.com/elastic/beats/libbeat/scripts/generate_makefile_doc.py new file mode 100644 index 00000000..0295512e --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/scripts/generate_makefile_doc.py @@ -0,0 +1,163 @@ +#!/usr/bin/env python + +""" +This script generates and output a documentation from a list of Makefile files + +Example usage: + + python generate_makefile_doc.py Makefile1 Makefile2 ... +""" + +import argparse +import re + + +# Parse a Makefile target line: +# +# Example 1: +# unit: ## @testing Runs the unit tests without coverage reports. +# name => unit +# varname => None +# category => testing +# doc => Runs the unit tests without coverage reports. +# +# Example 2: +# ${BEATNAME}: $(GOFILES_ALL) ## @build build the beat application +# name => None +# varname => BEATNAME +# category => testing +# doc => Runs the unit tests without coverage reports. +regexp_target_doc = re.compile(r'^((?P(-|_|\w)+)|(\${(?P(-|_|\w)+)}))\s*:.*\#\#+\s*@(?P(\w+))\s+(?P(.*))') + + + +# Parse a Makefile variable assignement: +# +# Example 1: +# BEAT_LICENSE?=ASL 2.0 ## @packaging Software license of the application +# name => BEAT_LICENSE +# default => ASL 2.0 +# category => packaging +# doc => Software license of the application +# +## Example 2: +# BEATNAME?=filebeat +# name => BEATNAME +# default => libbeat +# category => None +# doc => None +# +regexp_var_help = re.compile(r'^(?P(\w)+)\s*(\?)?=\s*(?P([^\#]+))(\s+\#\#+\s*@(?P(\w+))(:)?\s+(?P(.*))|\s*$)') + + +# Parse a Makefile line according to the given regexp +# - insert the dict { name, default, is_variable, category, doc} to the categories dictionary +# - insert the category to the categories_set +# - return a pair [name, value] if the line is a Makefile variable assignement +def parse_line(line, regexp, categories, categories_set): + matches = regexp.match(line) + variable = None + if matches: + name = None + variable = False + try: + name = matches.group("varname") + is_variable = True + except: + pass + try: + default = matches.group("default").strip() + except: + default = "" + + if not name: + name = matches.group("name") + is_variable = False + + if name: + variable = [name, default] + + category = matches.group("category") + if category: + category = category.replace("_", " ").capitalize() + doc = matches.group("doc").rstrip('.').rstrip() + doc = doc[0].capitalize() + doc[1:] # Capitalize the first word + + if category not in categories_set: + categories_set.append(category) + categories[category] = [] + + categories[category].append({ + "name": name, + "doc": doc, + "is_variable": is_variable, + "default": default, + }) + return variable + + +# Substitute all Makefile targets whose names are Makefile variables by their final name. +# +# Example in Makefile: +# +# ${BEATNAME}: $(GOFILES_ALL) ## @build build the beat application +# go build +# +# BEATNAME is a Makefile target whose name ${BEATNAME} is a Makefile variable. +# The name of the rule is changed from "BEATNAME" to "filebeat" +# +def substitute_variable_targets(targets, variables): + target_variables = ([target for category in targets for target in targets[category] if target['is_variable']]) + for variable in target_variables: + variable['name'] = variables[variable['name']] + variable['variable'] = False + +# Display the help to stdout +def print_help(categories, categories_set): + column_size = max(len(rule["name"]) for category in categories_set for rule in categories[category]) + for category in categories_set: + print ("\n{}:".format(category)) + for rule in categories[category]: + if "name" in rule: + name = rule["name"] + if "varname" in rule: + name = rule["varname"] + default = rule["default"] + print ("\t{target: <{fill}}\t{doc}.{default}".format( + target=rule["name"], fill=column_size, + doc=rule["doc"], + default=(" Default: {}".format(default) if default else ""))) + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Generate documentation from a list of Makefile files") + + parser.add_argument( "--variables", dest='variables', + action='store_true') + + parser.add_argument( "files", nargs="+", type=argparse.FileType('r'), + help="list of Makefiles to analyze", + default=None) + args = parser.parse_args() + + categories_targets = {} + categories_vars = {} + categories_targets_set = [] + categories_vars_set = [] + variables = {} + + for file in args.files: + for line in file.readlines(): + parse_line(line, regexp_target_doc, categories_targets, categories_targets_set) + variable = parse_line(line, regexp_var_help, categories_vars, categories_vars_set) + if variable and variable[0] not in variables: + variables[variable[0]] = variable[1] + + substitute_variable_targets(categories_targets, variables) + + if not args.variables: + print ("Usage: make [target] [VARIABLE=value]") + print_help(categories_targets, categories_targets_set) + else: + print_help(categories_vars, categories_vars_set) + diff --git a/vendor/github.com/elastic/beats/libbeat/scripts/generate_template.py b/vendor/github.com/elastic/beats/libbeat/scripts/generate_template.py index cd81db75..2b6fb53e 100644 --- a/vendor/github.com/elastic/beats/libbeat/scripts/generate_template.py +++ b/vendor/github.com/elastic/beats/libbeat/scripts/generate_template.py @@ -46,7 +46,7 @@ def fields_to_es_template(args, input, output, index, version): "template": index, "order": 0, "settings": { - "index.refresh_interval": "5s" + "index.refresh_interval": "5s", }, "mappings": { "_default_": { @@ -66,6 +66,11 @@ def fields_to_es_template(args, input, output, index, version): template["mappings"]["_default_"]["_all"]["norms"] = { "enabled": False } + else: + # For ES 5.x, increase the limit on the max number of fields. + # In a typical scenario, most fields are not used, so increasing the + # limit shouldn't be that bad. + template["settings"]["index.mapping.total_fields.limit"] = 10000 properties = {} dynamic_templates = [] @@ -205,6 +210,18 @@ def fill_field_properties(args, field, defaults, path): "ignore_above": 1024 } + elif field["type"] == "ip": + if args.es2x: + properties[field["name"]] = { + "type": "string", + "index": "not_analyzed", + "ignore_above": 1024 + } + else: + properties[field["name"]] = { + "type": "ip" + } + elif field["type"] in ["geo_point", "date", "long", "integer", "double", "float", "half_float", "scaled_float", "boolean"]: diff --git a/vendor/github.com/elastic/beats/libbeat/tests/files/template-es2x.json b/vendor/github.com/elastic/beats/libbeat/tests/files/template-es2x.json new file mode 100644 index 00000000..93059edd --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/tests/files/template-es2x.json @@ -0,0 +1,43 @@ +{ + "mappings": { + "_default_": { + "_all": { + "enabled": true, + "norms": { + "enabled": false + } + }, + "dynamic_templates": [ + { + "template1": { + "mapping": { + "doc_values": true, + "ignore_above": 1024, + "index": "not_analyzed", + "type": "{dynamic_type}" + }, + "match": "*" + } + } + ], + "properties": { + "@timestamp": { + "type": "date" + }, + "message": { + "type": "string", + "index": "analyzed" + }, + "offset": { + "type": "long", + "doc_values": "true" + } + } + } + }, + "settings": { + "index.refresh_interval": "5s" + }, + "template": "mockbeat-*" +} + diff --git a/vendor/github.com/elastic/beats/libbeat/tests/files/template.json b/vendor/github.com/elastic/beats/libbeat/tests/files/template.json index 027f9891..0082695e 100644 --- a/vendor/github.com/elastic/beats/libbeat/tests/files/template.json +++ b/vendor/github.com/elastic/beats/libbeat/tests/files/template.json @@ -25,7 +25,7 @@ "type": "date" }, "message": { - "type": "string", + "type": "text", "index": "analyzed" }, "offset": { diff --git a/vendor/github.com/elastic/beats/libbeat/tests/system/beat/beat.py b/vendor/github.com/elastic/beats/libbeat/tests/system/beat/beat.py index 66c3fced..6e3379a6 100644 --- a/vendor/github.com/elastic/beats/libbeat/tests/system/beat/beat.py +++ b/vendor/github.com/elastic/beats/libbeat/tests/system/beat/beat.py @@ -401,7 +401,7 @@ class TestCase(unittest.TestCase): dictfields.extend(subdictfields) else: fields.append(newName) - if field.get("type") == "dict": + if field.get("type") in ["dict", "geo_point"]: dictfields.append(newName) return fields, dictfields @@ -462,3 +462,13 @@ class TestCase(unittest.TestCase): return pred(len([1 for line in f])) except IOError: return False + + def get_elasticsearch_url(self): + """ + Returns an elasticsearch.Elasticsearch instance built from the + env variables like the integration tests. + """ + return "http://{host}:{port}".format( + host=os.getenv("ES_HOST", "localhost"), + port=os.getenv("ES_PORT", "9200"), + ) diff --git a/vendor/github.com/elastic/beats/libbeat/tests/system/requirements.txt b/vendor/github.com/elastic/beats/libbeat/tests/system/requirements.txt index c8bebf04..d6596fa3 100644 --- a/vendor/github.com/elastic/beats/libbeat/tests/system/requirements.txt +++ b/vendor/github.com/elastic/beats/libbeat/tests/system/requirements.txt @@ -5,3 +5,4 @@ PyYAML nose-timer redis elasticsearch +requests diff --git a/vendor/github.com/elastic/beats/metricbeat/.gitignore b/vendor/github.com/elastic/beats/metricbeat/.gitignore index 5dcd9d08..32c8135a 100644 --- a/vendor/github.com/elastic/beats/metricbeat/.gitignore +++ b/vendor/github.com/elastic/beats/metricbeat/.gitignore @@ -1,5 +1,7 @@ build +_meta/kibana /metricbeat /metricbeat.test -/docs/html_docs \ No newline at end of file +/docs/html_docs + diff --git a/vendor/github.com/elastic/beats/metricbeat/Dockerfile b/vendor/github.com/elastic/beats/metricbeat/Dockerfile index 123bf642..ffcb7bb4 100644 --- a/vendor/github.com/elastic/beats/metricbeat/Dockerfile +++ b/vendor/github.com/elastic/beats/metricbeat/Dockerfile @@ -1,9 +1,10 @@ -FROM golang:1.7.1 +FROM golang:1.7.4 MAINTAINER Nicolas Ruflin RUN set -x && \ apt-get update && \ - apt-get install -y netcat python-virtualenv python-pip && \ + apt-get install -y --no-install-recommends \ + netcat python-pip virtualenv && \ apt-get clean # Setup work environment diff --git a/vendor/github.com/elastic/beats/metricbeat/_meta/beat.full.yml b/vendor/github.com/elastic/beats/metricbeat/_meta/beat.full.yml index 8bcb0c7e..294e79b7 100644 --- a/vendor/github.com/elastic/beats/metricbeat/_meta/beat.full.yml +++ b/vendor/github.com/elastic/beats/metricbeat/_meta/beat.full.yml @@ -39,6 +39,9 @@ metricbeat.modules: # Per process stats - process + + # Sockets and connection info (linux only) + #- socket enabled: true period: 10s processes: ['.*'] @@ -49,6 +52,11 @@ metricbeat.modules: # EXPERIMENTAL: cgroups can be enabled for the process metricset. #cgroups: false + # Configure reverse DNS lookup on remote IP addresses in the socket metricset. + #socket.reverse_lookup.enabled: false + #socket.reverse_lookup.success_ttl: 60s + #socket.reverse_lookup.failure_ttl: 60s + #------------------------------- Apache Module ------------------------------- #- module: apache #metricsets: ["status"] @@ -67,6 +75,13 @@ metricbeat.modules: # Password of hosts. Empty by default #password: test123 +#------------------------------ Couchbase Module ----------------------------- +#- module: couchbase + #metricsets: ["cluster", "node", "bucket"] + #enabled: true + #period: 10s + #hosts: ["localhost:8091"] + #------------------------------- Docker Module ------------------------------- #- module: docker #metricsets: ["cpu", "info", "memory", "network", "diskio", "container"] @@ -80,7 +95,7 @@ metricbeat.modules: #certificate: "/etc/pki/client/cert.pem" #key: "/etc/pki/client/cert.key" -#------------------------------- haproxy Module ------------------------------ +#------------------------------- HAProxy Module ------------------------------ #- module: haproxy #metricsets: ["info", "stat"] #enabled: true @@ -195,6 +210,15 @@ metricbeat.modules: #password: pass +#----------------------------- Prometheus Module ----------------------------- +#- module: prometheus + #metricsets: ["stats"] + #enabled: true + #period: 10s + #hosts: ["localhost:9090"] + #metrics_path: /metrics + #namespace: example + #-------------------------------- Redis Module ------------------------------- #- module: redis #metricsets: ["info", "keyspace"] @@ -204,9 +228,6 @@ metricbeat.modules: # Redis hosts #hosts: ["127.0.0.1:6379"] - # Enabled defines if the module is enabled. Default: true - #enabled: true - # Timeout after which time a metricset should return an error # Timeout is by default defined as period, as a fetch of a metricset # should never take longer then period, as otherwise calls can pile up. diff --git a/vendor/github.com/elastic/beats/metricbeat/_meta/beat.yml b/vendor/github.com/elastic/beats/metricbeat/_meta/beat.yml index 592c10be..6ab09882 100644 --- a/vendor/github.com/elastic/beats/metricbeat/_meta/beat.yml +++ b/vendor/github.com/elastic/beats/metricbeat/_meta/beat.yml @@ -39,6 +39,9 @@ metricbeat.modules: # Per process stats - process + + # Sockets (linux only) + #- socket enabled: true period: 10s processes: ['.*'] diff --git a/vendor/github.com/elastic/beats/metricbeat/_meta/fields.common.yml b/vendor/github.com/elastic/beats/metricbeat/_meta/fields.common.yml index a5a00c83..b47ccbd1 100644 --- a/vendor/github.com/elastic/beats/metricbeat/_meta/fields.common.yml +++ b/vendor/github.com/elastic/beats/metricbeat/_meta/fields.common.yml @@ -23,6 +23,11 @@ description: > Event round trip time in microseconds. + - name: metricset.namespace + type: keyword + description: > + Namespace of dynamic metricsets. + - name: type required: true example: metricsets diff --git a/vendor/github.com/elastic/beats/metricbeat/beater/event.go b/vendor/github.com/elastic/beats/metricbeat/beater/event.go index 2716ceec..c1e9af8b 100644 --- a/vendor/github.com/elastic/beats/metricbeat/beater/event.go +++ b/vendor/github.com/elastic/beats/metricbeat/beater/event.go @@ -56,18 +56,28 @@ func (b EventBuilder) Build() (common.MapStr, error) { delete(event, mb.ModuleData) } + metricsetData := common.MapStr{ + "module": b.ModuleName, + "name": b.MetricSetName, + "rtt": b.FetchDuration.Nanoseconds() / int64(time.Microsecond), + } + + namespace := b.MetricSetName + if n, ok := event["_namespace"]; ok { + delete(event, "_namespace") + namespace = n.(string) + // TODO: check if namespace does not already exist + metricsetData["namespace"] = namespace + } + event = common.MapStr{ "@timestamp": timestamp, "type": typeName, common.EventMetadataKey: b.metadata, b.ModuleName: common.MapStr{ - b.MetricSetName: event, - }, - "metricset": common.MapStr{ - "module": b.ModuleName, - "name": b.MetricSetName, - "rtt": b.FetchDuration.Nanoseconds() / int64(time.Microsecond), + namespace: event, }, + "metricset": metricsetData, } // In case meta data exists, it is added on the module level diff --git a/vendor/github.com/elastic/beats/metricbeat/docker-compose.yml b/vendor/github.com/elastic/beats/metricbeat/docker-compose.yml index 991fe07a..30340725 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docker-compose.yml +++ b/vendor/github.com/elastic/beats/metricbeat/docker-compose.yml @@ -2,19 +2,26 @@ version: '2' services: beat: build: ${PWD}/. - links: + depends_on: - apache + - couchbase - mongodb + - haproxy - kafka - mysql - nginx - postgresql + - prometheus - redis - zookeeper - - haproxy environment: - APACHE_HOST=apache - APACHE_PORT=80 + - COUCHBASE_HOST=couchbase + - COUCHBASE_PORT=8091 + - COUCHBASE_DSN=http://Administrator:password@couchbase:8091 + - HAPROXY_HOST=haproxy + - HAPROXY_PORT=14567 - KAFKA_HOST=kafka - KAFKA_PORT=9092 - NGINX_HOST=nginx @@ -30,10 +37,10 @@ services: - POSTGRESQL_HOST=postgresql - POSTGRESQL_PORT=5432 - POSTGRESQL_USERNAME=postgres + - PROMETHEUS_HOST=prometheus + - PROMETHEUS_PORT=9090 - ZOOKEEPER_HOST=zookeeper - ZOOKEEPER_PORT=2181 - - HAPROXY_HOST=haproxy - - HAPROXY_PORT=14567 - TEST_ENVIRONMENT=false working_dir: /go/src/github.com/elastic/beats/metricbeat volumes: @@ -44,14 +51,15 @@ services: # Overloading kibana with a simple image as it is not needed here kibana: - build: - context: ${PWD}/../testing/environments/ - dockerfile: Dockerfile + image: alpine:latest # Modules apache: build: ${PWD}/module/apache/_meta + couchbase: + build: ${PWD}/module/couchbase/_meta + kafka: image: spotify/kafka expose: @@ -77,10 +85,13 @@ services: postgresql: image: postgres:9.5.3 + prometheus: + image: prom/prometheus + expose: + - 9090 + redis: image: redis:3.2.4-alpine zookeeper: image: jplock/zookeeper:3.4.8 - - diff --git a/vendor/github.com/elastic/beats/metricbeat/docker-entrypoint.sh b/vendor/github.com/elastic/beats/metricbeat/docker-entrypoint.sh index a7ac5ec9..25087fc7 100755 --- a/vendor/github.com/elastic/beats/metricbeat/docker-entrypoint.sh +++ b/vendor/github.com/elastic/beats/metricbeat/docker-entrypoint.sh @@ -23,11 +23,13 @@ waitFor() { # Main waitFor ${APACHE_HOST} ${APACHE_PORT} Apache +waitFor ${COUCHBASE_HOST} ${COUCHBASE_PORT} Couchbase waitFor ${HAPROXY_HOST} ${HAPROXY_PORT} HAProxy waitFor ${KAFKA_HOST} ${KAFKA_PORT} Kafka waitFor ${MYSQL_HOST} ${MYSQL_PORT} MySQL waitFor ${NGINX_HOST} ${NGINX_PORT} Nginx waitFor ${POSTGRESQL_HOST} ${POSTGRESQL_PORT} Postgresql +waitFor ${PROMETHEUS_HOST} ${PROMETHEUS_PORT} Prometheus waitFor ${REDIS_HOST} ${REDIS_PORT} Redis waitFor ${ZOOKEEPER_HOST} ${ZOOKEEPER_PORT} Zookeeper exec "$@" diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/developer-guide/creating-beat-from-metricbeat.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/developer-guide/creating-beat-from-metricbeat.asciidoc new file mode 100644 index 00000000..87144731 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/docs/developer-guide/creating-beat-from-metricbeat.asciidoc @@ -0,0 +1,100 @@ +[[creating-beat-from-metricbeat]] +== Creating a Beat based on Metricbeat + +The metricset Beat generator enables you to create a Beat that uses Metricbeat as a library and has your +own metricsets. + +[float] +=== Requirements + +To create your own Beat, you must have Golang {go-version} or later installed, and the `$GOPATH` +must be set up correctly. In addition, the following tools are quired: + +* https://www.python.org/downloads/[python] +* https://virtualenv.pypa.io/en/stable/[virtualenv] +* https://github.com/audreyr/cookiecutter[cookiecutter] + +Virtualenv and Cookiecutter are easiest installed with your package manager or https://pip.pypa.io/en/stable/[pip]. For more details on how to +install cookiecutter, see the http://cookiecutter.readthedocs.io/en/latest/installation.html[cookiecutter installation docs]. + +[float] +=== Step 1 - Get the metricbeat source code + +The first step is to get the metricbeat source code: + +[source,bash] +---- +go get github.com/elastic/beats/metricbeat +---- + +This will clone the beats repository into `GOPATH`. By default `go get` fetches the master branch. To build your beat +on a specific version of libbeat, check out the specific branch ({doc-branch} in the example below): + +["source","sh",subs="attributes"] +---- +cd $GOPATH/src/github.com/elastic/beats +git checkout {doc-branch} +---- + + +Now change to the directory where you want to create the beat. +This directory is normally located under `$GOPATH/src/github.com/{your-github-name}`. Inside this directory, run the command to create the beat. + + +[float] +=== Step 2 - Create the Beat + +Run the command: + +[source,bash] +---- +cookiecutter $GOPATH/src/github.com/elastic/beats/generate/metricbeat/metricset +---- + +When prompted, enter the Beat name and path. + + +[float] +=== Step 3 - Init and create the metricset + +After creating the Beat, change the directory to `$GOPATH/src/github.com/{your-github-name}/{beat}` and run: + +[source,bash] +---- +make setup +---- + +This will do the initial setup for your Beat and also run `make create-metricset`, which will ask you for the +module name and metricset name of your Beat. + +For more details about creating a metricset, see the docs about https://www.elastic.co/guide/en/beats/metricbeat/current/creating-metricsets.html[creating a metricset]. + + +[float] +=== Step 4 - Build & Run + +To create a binary run the `make` command. This will create the binary in your beats directory. + +To run it, execute the binary. This will automatically load the default configuration which was generated by `make update`. + +[source,bash] +---- +./beatname -e -d "*" +---- + +This will run the beat with debug output enabled to the console to directly see what is happening. Stop the beat with `CTRL-C`. + +[float] +=== Step 5 - Package + +To create packages and binaries for different plaforms, https://www.docker.com/[docker] is required. +The first step is to get the most recent packaging tools into your beat: + +[source,bash] +---- +make package +---- + +This will fetch the most recent packaging tools and start the packaging of your beat. This can take a little bit longer. + +For more details about creating a metricset, see the docs about https://www.elastic.co/guide/en/beats/metricbeat/current/creating-metricsets.html[creating a metricset]. diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/developer-guide/index.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/developer-guide/index.asciidoc index 0bc5315e..b2f3fa1b 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/developer-guide/index.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/developer-guide/index.asciidoc @@ -32,13 +32,13 @@ https://github.com/elastic/beats/blob/master/CONTRIBUTING.md[general contributor [[metricbeat-dev-overview]] == Overview -Metricbeat consists of modules and metricsets. A Metricbeat module is typically +Metricbeat consists of modules and metricsets. A Metricbeat module is typically named after the service the metrics are fetched from, such as redis, mysql, and so on. Each module can contain multiple metricsets. A metricset represents multiple metrics that are normally retrieved with one request from the remote system. For example, the Redis `info` metricset retrieves info that you get when you run the Redis `INFO` command, and the MySQL `status` metricset retrieves -info that you get when you issue the MySQL `SHOW GLOBAL STATUS` query. +info that you get when you issue the MySQL `SHOW GLOBAL STATUS` query. [float] == Module and Metricsets Requirements @@ -58,13 +58,12 @@ directly from the service itself and not via a third-party tool. The goal is to have as few movable parts as possible and for Metricbeat to run as close as possible to the service that it needs to monitor. - include::./create-metricset.asciidoc[] include::./metricset-details.asciidoc[] include::./create-module.asciidoc[] -include::../../../generate/metricbeat/metricset/README.asciidoc[] +include::./creating-beat-from-metricbeat.asciidoc[] include::./faq.asciidoc[] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/fields.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/fields.asciidoc index d025cacf..0466dfed 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/fields.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/fields.asciidoc @@ -16,6 +16,7 @@ grouped in the following categories: * <> * <> * <> +* <> * <> * <> * <> @@ -23,6 +24,7 @@ grouped in the following categories: * <> * <> * <> +* <> * <> * <> * <> @@ -495,6 +497,14 @@ required: True Event round trip time in microseconds. +[float] +=== metricset.namespace + +type: keyword + +Namespace of dynamic metricsets. + + [float] === type @@ -505,6 +515,466 @@ required: True The document type. Always set to "metricsets". +[[exported-fields-couchbase]] +== Couchbase Fields + +experimental[] +Metrics collected from Couchbase servers. + + + +[float] +== couchbase Fields + +`couchbase` contains the metrics that were scraped from Couchbase. + + + +[float] +== bucket Fields + +Couchbase bucket metrics. + + + +[float] +=== couchbase.bucket.name + +type: keyword + +Name of the bucket. + + +[float] +=== couchbase.bucket.type + +type: keyword + +Type of the bucket. + + +[float] +=== couchbase.bucket.data.used.bytes + +type: long + +format: bytes + +Size of user data within buckets of the specified state that are resident in RAM. + + +[float] +=== couchbase.bucket.disk.fetches + +type: long + +Number of disk fetches. + + +[float] +=== couchbase.bucket.disk.used.bytes + +type: long + +format: bytes + +Amount of disk used (bytes). + + +[float] +=== couchbase.bucket.memory.used.bytes + +type: long + +format: bytes + +Amount of memory used by the bucket (bytes). + + +[float] +=== couchbase.bucket.quota.ram.bytes + +type: long + +format: bytes + +Amount of RAM used by the bucket (bytes). + + +[float] +=== couchbase.bucket.quota.use.pct + +type: scaled_float + +format: percent + +Percentage of RAM used (for active objects) against the configured bucket size (%). + + +[float] +=== couchbase.bucket.ops_per_sec + +type: long + +Number of operations per second. + + +[float] +=== couchbase.bucket.item_count + +type: long + +Number of items associated with the bucket. + + +[float] +== cluster Fields + +Couchbase cluster metrics. + + + +[float] +=== couchbase.cluster.hdd.free.bytes + +type: long + +format: bytes + +Free hard drive space in the cluster (bytes). + + +[float] +=== couchbase.cluster.hdd.quota.total.bytes + +type: long + +format: bytes + +Hard drive quota total for the cluster (bytes). + + +[float] +=== couchbase.cluster.hdd.total.bytes + +type: long + +format: bytes + +Total hard drive space available to the cluster (bytes). + + +[float] +=== couchbase.cluster.hdd.used.value.bytes + +type: long + +format: bytes + +Hard drive space used by the cluster (bytes). + + +[float] +=== couchbase.cluster.hdd.used.by_data.bytes + +type: long + +format: bytes + +Hard drive space used by the data in the cluster (bytes). + + +[float] +=== couchbase.cluster.max_bucket_count + +type: long + +Max bucket count setting. + + +[float] +=== couchbase.cluster.quota.index_memory.mb + +type: long + +Memory quota setting for the Index service (Mbyte). + + +[float] +=== couchbase.cluster.quota.memory.mb + +type: long + +Memory quota setting for the cluster (Mbyte). + + +[float] +=== couchbase.cluster.ram.quota.total.value.bytes + +type: long + +format: bytes + +RAM quota total for the cluster (bytes). + + +[float] +=== couchbase.cluster.ram.quota.total.per_node.bytes + +type: long + +format: bytes + +RAM quota used by the current node in the cluster (bytes). + + +[float] +=== couchbase.cluster.ram.quota.used.value.bytes + +type: long + +format: bytes + +RAM quota used by the cluster (bytes). + + +[float] +=== couchbase.cluster.ram.quota.used.per_node.bytes + +type: long + +format: bytes + +Ram quota used by the current node in the cluster (bytes) + + +[float] +=== couchbase.cluster.ram.total.bytes + +type: long + +format: bytes + +Total RAM available to cluster (bytes). + + +[float] +=== couchbase.cluster.ram.used.value.bytes + +type: long + +format: bytes + +RAM used by the cluster (bytes). + + +[float] +=== couchbase.cluster.ram.used.by_data.bytes + +type: long + +format: bytes + +RAM used by the data in the cluster (bytes). + + +[float] +== node Fields + +Couchbase node metrics. + + + +[float] +=== couchbase.node.cmd_get + +type: long + +Number of get commands + + +[float] +=== couchbase.node.couch.docs.actual_disk_size.bytes + +type: long + +format: bytes + +Amount of disk space used by Couch docs (bytes). + + +[float] +=== couchbase.node.couch.docs.data_size.bytes + +type: long + +format: bytes + +Data size of Couch docs associated with a node (bytes). + + +[float] +=== couchbase.node.couch.spatial.data_size.bytes + +type: long + +Size of object data for spatial views (bytes). + + +[float] +=== couchbase.node.couch.spatial.disk_size.bytes + +type: long + +Amount of disk space used by spatial views (bytes). + + +[float] +=== couchbase.node.couch.views.actual_disk_size.bytes + +type: long + +Amount of disk space used by Couch views (bytes). + + +[float] +=== couchbase.node.couch.views.data_size.bytes + +type: long + +Size of object data for Couch views (bytes). + + +[float] +=== couchbase.node.cpu_utilization_rate.pct + +type: scaled_float + +The CPU utilization rate (%). + + +[float] +=== couchbase.node.current_items.value + +type: long + +Number of current items. + + +[float] +=== couchbase.node.current_items.total + +type: long + +Total number of items associated with the node. + + +[float] +=== couchbase.node.ep_bg_fetched + +type: long + +Number of disk fetches performed since the server was started. + + +[float] +=== couchbase.node.get_hits + +type: long + +Number of get hits. + + +[float] +=== couchbase.node.hostname + +type: keyword + +The hostname of the node. + + +[float] +=== couchbase.node.mcd_memory.allocated.bytes + +type: long + +format: bytes + +Amount of memcached memory allocated (bytes). + + +[float] +=== couchbase.node.mcd_memory.reserved.bytes + +type: long + +Amount of memcached memory reserved (bytes). + + +[float] +=== couchbase.node.memory.free.bytes + +type: long + +Amount of memory free for the node (bytes). + + +[float] +=== couchbase.node.memory.total.bytes + +type: long + +Total memory available to the node (bytes). + + +[float] +=== couchbase.node.memory.used.bytes + +type: long + +Memory used by the node (bytes). + + +[float] +=== couchbase.node.ops + +type: long + +Number of operations performed on Couchbase. + + +[float] +=== couchbase.node.swap.total.bytes + +type: long + +Total swap size allocated (bytes). + + +[float] +=== couchbase.node.swap.used.bytes + +type: long + +Amount of swap space used (bytes). + + +[float] +=== couchbase.node.uptime.sec + +type: long + +Time during which the node was in operation (sec). + + +[float] +=== couchbase.node.vb_replica_curr_items + +type: long + +Number of items/documents that are replicas. + + [[exported-fields-docker]] == Docker Fields @@ -944,24 +1414,24 @@ Total number of outgoing packets. [[exported-fields-haproxy]] -== haproxy Fields +== HAProxy Fields experimental[] -haproxy Module +HAProxy Module [float] == haproxy Fields -HAProx metrics. +HAProxy metrics. [float] == info Fields -General infomration collected on HAProxy process +General information about HAProxy processes. @@ -970,7 +1440,7 @@ General infomration collected on HAProxy process type: long -Number of processes +Number of processes. [float] @@ -978,7 +1448,7 @@ Number of processes type: long -Process number +Process number. [float] @@ -986,7 +1456,7 @@ Process number type: long -Process ID +Process ID. [float] @@ -1008,7 +1478,7 @@ type: long type: long -Current uptime in seconds +Current uptime in seconds. [float] @@ -1018,7 +1488,7 @@ type: long format: bytes -Max number of memory usage in bytes (The 'Memmax_MB' value converted to bytes) +Maximum amount of memory usage in bytes (the 'Memmax_MB' value converted to bytes). [float] @@ -1026,7 +1496,7 @@ Max number of memory usage in bytes (The 'Memmax_MB' value converted to bytes) type: long -Max number of open files for process +Maximum number of open files for the process. [float] @@ -1100,7 +1570,7 @@ type: long type: long -Current Connections +Current connections. [float] @@ -1108,7 +1578,7 @@ Current Connections type: long -Total Connections +Total connections. [float] @@ -1116,7 +1586,7 @@ Total Connections type: long -Current ssl Connections +Current SSL connections. [float] @@ -1124,7 +1594,7 @@ Current ssl Connections type: long -Current Connections +Total SSL connections. [float] @@ -1132,7 +1602,7 @@ Current Connections type: long -Current Connections +Maximum SSL connections. [float] @@ -1140,7 +1610,7 @@ Current Connections type: long -Max connections +Maximum connections. [float] @@ -1347,7 +1817,7 @@ format: percent [float] == stat Fields -Stats collected from HAProxy process +Stats collected from HAProxy processes. @@ -1356,7 +1826,7 @@ Stats collected from HAProxy process type: keyword -status (UP/DOWN/NOLB/MAINT/MAINT(via)...) +Status (UP, DOWN, NOLB, MAINT, or MAINT(via)...). [float] @@ -1364,7 +1834,7 @@ status (UP/DOWN/NOLB/MAINT/MAINT(via)...) type: long -total weight (backend), server weight (server) +Total weight (for backends), or server weight (for servers). [float] @@ -1372,7 +1842,7 @@ total weight (backend), server weight (server) type: long -total downtime (in seconds). The value for the backend is the downtime for the whole backend, not the sum of the server downtime. +Total downtime (in seconds). For backends, this value is the downtime for the whole backend, not the sum of the downtime for the servers. [float] @@ -1380,7 +1850,7 @@ total downtime (in seconds). The value for the backend is the downtime for the w type: integer -(0=frontend, 1=backend, 2=server, 3=socket/listener) +Component type (0=frontend, 1=backend, 2=server, or 3=socket/listener). [float] @@ -1388,7 +1858,7 @@ type: integer type: integer -process id (0 for first instance, 1 for second, ...) +Process ID (0 for first instance, 1 for second, and so on). [float] @@ -1396,7 +1866,7 @@ process id (0 for first instance, 1 for second, ...) type: keyword -service name (FRONTEND for frontend, BACKEND for backend, any name for server/listener) +Service name (FRONTEND for frontend, BACKEND for backend, or any name for server/listener). [float] @@ -1406,7 +1876,7 @@ type: long format: bytes -bytes in +Bytes in. [float] @@ -1416,7 +1886,7 @@ type: long format: bytes -bytes out +Bytes out. [float] @@ -1424,7 +1894,7 @@ bytes out type: integer -number of seconds since the last UP<->DOWN transition +Number of seconds since the last UP->DOWN or DOWN->UP transition. [float] @@ -1434,7 +1904,7 @@ type: scaled_float format: percentage -current throttle percentage for the server, when slowstart is active, or no value if not in slowstart. +Current throttle percentage for the server when slowstart is active, or no value if slowstart is inactive. [float] @@ -1442,7 +1912,7 @@ current throttle percentage for the server, when slowstart is active, or no valu type: long -total number of times a server was selected, either for new sessions, or when re-dispatching. The server counter is the number of times that server was selected. +Total number of times a server was selected, either for new sessions, or when re-dispatching. For servers, this field reports the the number of times the server was selected. [float] @@ -1450,7 +1920,7 @@ total number of times a server was selected, either for new sessions, or when re type: long -id of proxy/server if tracking is enabled. +ID of the proxy/server if tracking is enabled. @@ -1459,7 +1929,7 @@ id of proxy/server if tracking is enabled. type: long -cumulative number of connections +Cumulative number of connections. [float] @@ -1467,7 +1937,7 @@ cumulative number of connections type: long -number of times a connection to a server was retried. +Number of times a connection to a server was retried. [float] @@ -1475,7 +1945,7 @@ number of times a connection to a server was retried. type: long -the average connect time in ms over the 1024 last requests +Average connect time in ms over the last 1024 requests. @@ -1484,9 +1954,10 @@ the average connect time in ms over the 1024 last requests type: long -requests denied because of security concerns. - * For tcp this is because of a matched tcp-request content rule. - * For http this is because of a matched http-request or tarpit rule. +Requests denied because of security concerns. + + * For TCP this is because of a matched tcp-request content rule. + * For HTTP this is because of a matched http-request or tarpit rule. [float] @@ -1494,7 +1965,7 @@ requests denied because of security concerns. type: long -current queued requests. For the backend this reports the number queued without a server assigned +Current queued requests. For backends, this field reports the number of requests queued without a server assigned. [float] @@ -1502,7 +1973,7 @@ current queued requests. For the backend this reports the number queued without type: long -max value of queued.current +Maximum value of queued.current. [float] @@ -1510,8 +1981,9 @@ max value of queued.current type: long -request errors. Some of the possible causes are: - * early termination from the client, before the request has been sent. +Request errors. Some of the possible causes are: + + * early termination from the client, before the request has been sent * read error from the client * client timeout * client closed connection @@ -1524,7 +1996,7 @@ request errors. Some of the possible causes are: type: long -number of times a request was redispatched to another server. The server value counts the number of times that server was switched away from. +Number of times a request was redispatched to another server. For servers, this field reports the number of times the server was switched away from. [float] @@ -1532,7 +2004,7 @@ number of times a request was redispatched to another server. The server value c type: long -number of requests that encountered an error trying to connect to a backend server. The backend stat is the sum of the stat for all servers of that backend, plus any connection errors not associated with a particular server (such as the backend having no active servers). +Number of requests that encountered an error trying to connect to a server. For backends, this field reports the sum of the stat for all backend servers, plus any connection errors not associated with a particular server (such as the backend having no active servers). [float] @@ -1546,7 +2018,7 @@ number of requests that encountered an error trying to connect to a backend serv type: long -HTTP requests per second over last elapsed second +Number of HTTP requests per second over the last elapsed second. [float] @@ -1554,7 +2026,7 @@ HTTP requests per second over last elapsed second type: long -max number of HTTP requests per second observed +Maximum number of HTTP requests per second. [float] @@ -1562,7 +2034,7 @@ max number of HTTP requests per second observed type: long -total number of HTTP requests received +Total number of HTTP requests received. @@ -1571,7 +2043,8 @@ total number of HTTP requests received type: long -response errors. srv_abrt will be counted here also. Some other errors are: * write error on the client socket (won't be counted for the server stat) * failure applying filters to the respons +Number of response errors. This value includes the number of data transfers aborted by the server (haproxy.stat.server.aborted). Some other errors are: +* write errors on the client socket (won't be counted for the server stat) * failure applying filters to the response [float] @@ -1579,7 +2052,7 @@ response errors. srv_abrt will be counted here also. Some other errors are: * wr type: long -the average response time in ms over the 1024 last requests (0 for TCP) +Average response time in ms over the last 1024 requests (0 for TCP). [float] @@ -1587,8 +2060,7 @@ the average response time in ms over the 1024 last requests (0 for TCP) type: integer -responses denied because of security concerns. - * For http this is because of a matched http-request rule, or "option checkcache". +Responses denied because of security concerns. For HTTP this is because of a matched http-request rule, or "option checkcache". [float] @@ -1602,7 +2074,7 @@ responses denied because of security concerns. type: long -http responses with 1xx code +HTTP responses with 1xx code. [float] @@ -1610,7 +2082,7 @@ http responses with 1xx code type: long -http responses with 2xx code +HTTP responses with 2xx code. [float] @@ -1618,7 +2090,7 @@ http responses with 2xx code type: long -http responses with 3xx code +HTTP responses with 3xx code. [float] @@ -1626,7 +2098,7 @@ http responses with 3xx code type: long -http responses with 4xx code +HTTP responses with 4xx code. [float] @@ -1634,7 +2106,7 @@ http responses with 4xx code type: long -http responses with 5xx code +HTTP responses with 5xx code. [float] @@ -1642,7 +2114,7 @@ http responses with 5xx code type: long -http responses with other codes (protocol error) +HTTP responses with other codes (protocol error). @@ -1651,7 +2123,7 @@ http responses with other codes (protocol error) type: long -current sessions +Number of current sessions. [float] @@ -1659,7 +2131,7 @@ current sessions type: long -max sessions +Maximum number of sessions. [float] @@ -1667,7 +2139,7 @@ max sessions type: long -configured session limit +Configured session limit. @@ -1676,7 +2148,7 @@ configured session limit type: integer -number of sessions per second over last elapsed second +Number of sessions per second over the last elapsed second. [float] @@ -1684,7 +2156,7 @@ number of sessions per second over last elapsed second type: integer -configured limit on new sessions per second +Configured limit on new sessions per second. [float] @@ -1692,7 +2164,7 @@ configured limit on new sessions per second type: integer -max number of new sessions per second +Maximum number of new sessions per second. [float] @@ -1706,7 +2178,8 @@ max number of new sessions per second type: keyword -status of last health check, one of: +Status of the last health check. One of: + UNK -> unknown INI -> initializing SOCKERR -> socket error @@ -1730,7 +2203,7 @@ status of last health check, one of: type: long -layer5-7 code, if available +Layer 5-7 code, if available. [float] @@ -1738,7 +2211,7 @@ layer5-7 code, if available type: long -time in ms took to finish last health check +Time in ms that it took to finish the last health check. [float] @@ -1767,7 +2240,7 @@ type: integer type: long -number of failed checks. (Only counts checks failed when the server is up.) +Number of checks that failed while the server was up. [float] @@ -1775,7 +2248,7 @@ number of failed checks. (Only counts checks failed when the server is up.) type: long -number of UP->DOWN transitions. The backend counter counts transitions to the whole backend being down, rather than the sum of the counters for each server. +Number of UP->DOWN transitions. For backends, this value is the number of transitions to the whole backend being down, rather than the sum of the transitions for each server. [float] @@ -1783,7 +2256,7 @@ number of UP->DOWN transitions. The backend counter counts transitions to the wh type: integer -number of data transfers aborted by the client +Number of data transfers aborted by the client. [float] @@ -1797,7 +2270,7 @@ number of data transfers aborted by the client type: integer -server id (unique inside a proxy) +Server ID (unique inside a proxy). [float] @@ -1805,7 +2278,7 @@ server id (unique inside a proxy) type: integer -number of data transfers aborted by the server (inc. in eresp) +Number of data transfers aborted by the server. This value is included in haproxy.stat.response.errors. [float] @@ -1813,7 +2286,7 @@ number of data transfers aborted by the server (inc. in eresp) type: integer -number of active servers (backend), server is active (server) +Number of backend servers that are active, meaning that they are healthy and can receive requests from the load balancer. [float] @@ -1821,7 +2294,7 @@ number of active servers (backend), server is active (server) type: integer -number of backup servers (backend), server is backup (server) +Number of backend servers that are backup servers. [float] @@ -1837,7 +2310,7 @@ type: long format: bytes -number of HTTP response bytes fed to the compressor +Number of HTTP response bytes fed to the compressor. [float] @@ -1847,7 +2320,7 @@ type: integer format: bytes -number of HTTP response bytes emitted by the compressor +Number of HTTP response bytes emitted by the compressor. [float] @@ -1857,7 +2330,7 @@ type: long format: bytes -number of bytes that bypassed the HTTP compressor (CPU/BW limit) +Number of bytes that bypassed the HTTP compressor (CPU/BW limit). [float] @@ -1867,7 +2340,7 @@ type: long format: bytes -number of HTTP responses that were compressed +Number of HTTP responses that were compressed. [float] @@ -1881,7 +2354,7 @@ number of HTTP responses that were compressed type: integer -unique proxy id +Unique proxy ID. [float] @@ -1889,7 +2362,7 @@ unique proxy id type: keyword -proxy name +Proxy name. [float] @@ -1903,7 +2376,7 @@ proxy name type: integer -configured maxqueue for the server, or nothing in the value is 0 (default, meaning no limit) +Configured queue limit (maxqueue) for the server, or nothing if the value of maxqueue is 0 (meaning no limit). [float] @@ -1911,7 +2384,7 @@ configured maxqueue for the server, or nothing in the value is 0 (default, meani type: integer -the average queue time in ms over the 1024 last requests +The average queue time in ms over the last 1024 requests. [[exported-fields-kafka]] @@ -1928,6 +2401,107 @@ experimental[] +[float] +== consumergroup Fields + +consumergroup + + + +[float] +== broker Fields + +Broker Consumer Group Information have been read from (Broker handling the consumer group). + + + +[float] +=== kafka.consumergroup.broker.id + +type: long + +Broker id + + +[float] +=== kafka.consumergroup.broker.address + +type: keyword + +Broker address + + +[float] +=== kafka.consumergroup.id + +type: keyword + +Consumer Group ID + +[float] +=== kafka.consumergroup.topic + +type: keyword + +Topic name + +[float] +=== kafka.consumergroup.partition + +type: long + +Partition ID + +[float] +=== kafka.consumergroup.offset + +type: long + +consumer offset into partition being read + +[float] +=== kafka.consumergroup.meta + +type: text + +custom consumer meta data string + +[float] +=== kafka.consumergroup.error.code + +type: long + +kafka consumer/partition error code. + + +[float] +== client Fields + +Assigned client reading events from partition + + + +[float] +=== kafka.consumergroup.client.id + +type: keyword + +Client ID (kafka setting client.id) + +[float] +=== kafka.consumergroup.client.host + +type: keyword + +Client host + +[float] +=== kafka.consumergroup.client.member_id + +type: keyword + +internal consumer group member ID + [float] == partition Fields @@ -2990,6 +3564,40 @@ type: long +[float] +== command Fields + + + + +[float] +=== mysql.status.command.delete + +type: long + + + +[float] +=== mysql.status.command.insert + +type: long + + + +[float] +=== mysql.status.command.select + +type: long + + + +[float] +=== mysql.status.command.update + +type: long + + + [[exported-fields-nginx]] == Nginx Fields @@ -3492,6 +4100,66 @@ type: date Time at which these statistics were last reset. +[[exported-fields-prometheus]] +== Prometheus Fields + +beta[] +Stats collected from Prometheus. + + + +[float] +== prometheus Fields + + + + +[float] +== stats Fields + +Stats about the Prometheus server. + + + +[float] +== notifications Fields + +Notification stats. + + + +[float] +=== prometheus.stats.notifications.queue_length + +type: long + +Current queue length. + + +[float] +=== prometheus.stats.notifications.dropped + +type: long + +Number of dropped queue events. + + +[float] +=== prometheus.stats.processes.open_fds + +type: long + +Number of open file descriptors. + + +[float] +=== prometheus.stats.storage.chunks_to_persist + +type: long + +Number of memory chunks that are not yet persisted to disk. + + [[exported-fields-redis]] == Redis Fields @@ -4310,6 +4978,14 @@ The amount of CPU time spent in involuntary wait by the virtual CPU while the hy +[float] +=== system.cpu.cores + +type: long + +The number of CPU cores. + + [float] === system.cpu.user.pct @@ -5665,6 +6341,148 @@ type: long Total number of I/O operations performed on all devices by processes in the cgroup as seen by the throttling policy. +[float] +== socket Fields + +TCP sockets that are active. + + + +[float] +=== system.socket.direction + +type: keyword + +example: incoming + +How the socket was initiated. Possible values are incoming, outgoing, or listening. + + +[float] +=== system.socket.family + +type: keyword + +example: ipv4 + +Address family. + + +[float] +=== system.socket.local.ip + +type: ip + +example: 192.0.2.1 or 2001:0DB8:ABED:8536::1 + +Local IP address. This can be an IPv4 or IPv6 address. + + +[float] +=== system.socket.local.port + +type: long + +example: 22 + +Local port. + + +[float] +=== system.socket.remote.ip + +type: ip + +example: 192.0.2.1 or 2001:0DB8:ABED:8536::1 + +Remote IP address. This can be an IPv4 or IPv6 address. + + +[float] +=== system.socket.remote.port + +type: long + +example: 22 + +Remote port. + + +[float] +=== system.socket.remote.host + +type: keyword + +example: 76-211-117-36.nw.example.com. + +PTR record associated with the remote IP. It is obtained via reverse IP lookup. + + +[float] +=== system.socket.remote.etld_plus_one + +type: keyword + +example: example.com. + +The effective top-level domain (eTLD) of the remote host plus one more label. For example, the eTLD+1 for "foo.bar.golang.org." is "golang.org.". The data for determining the eTLD comes from an embedded copy of the data from http://publicsuffix.org. + + +[float] +=== system.socket.remote.host_error + +type: keyword + +Error describing the cause of the reverse lookup failure. + + +[float] +=== system.socket.process.pid + +type: long + +ID of the process that opened the socket. + + +[float] +=== system.socket.process.command + +type: keyword + +Name of the command (limited to 20 chars by the OS). + + +[float] +=== system.socket.process.cmdline + +type: keyword + + + +[float] +=== system.socket.process.exe + +type: keyword + +Absolute path to the executable. + + +[float] +=== system.socket.user.id + +type: long + +UID of the user running the process. + + +[float] +=== system.socket.user.name + +type: keyword + +Name of the user running the process. + + [[exported-fields-zookeeper]] == ZooKeeper Fields diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/gettingstarted.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/gettingstarted.asciidoc index cc136f53..6e17469b 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/gettingstarted.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/gettingstarted.asciidoc @@ -41,7 +41,7 @@ mac>> for OS X, and <> for Windows). [NOTE] ================================================== If you use Apt or Yum, you can -{libbeat}/setup-repositories.html[install Metricbeat from our repositories] to +<> to update to the newest version more easily. See our https://www.elastic.co/downloads/beats/metricbeat[download page] for @@ -112,7 +112,9 @@ For more information about these options, see To configure Metricbeat, you edit the configuration file. For rpm and deb, you'll find the configuration file at `/etc/metricbeat/metricbeat.yml`. For mac -and win, look in the archive that you just extracted. +and win, look in the archive that you just extracted. There’s also a full +example configuration file called `metricbeat.full.yml` that shows all +non-deprecated options. Metricbeat uses <> to collect metrics. You configure each module individually. The following example shows the default configuration @@ -190,7 +192,11 @@ include::../../libbeat/docs/shared-template-load.asciidoc[] [[metricbeat-starting]] === Step 4: Starting Metricbeat -Run Metricbeat by issuing the following command: +Run Metricbeat by issuing the appropriate command for your platform. + +NOTE: If you use an init.d script to start Metricbeat on deb or rpm, you can't +specify command line flags (see <>). To specify flags, +start Metricbeat in the foreground. *deb:* diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/how-metricbeat-works.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/how-metricbeat-works.asciidoc index ff16af4b..d52c30cc 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/how-metricbeat-works.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/how-metricbeat-works.asciidoc @@ -158,8 +158,6 @@ Elasticsearch, and it can optimize storage. Basic meta information about each metric (such as the host) is also sent as part of each event. -//QUESTION: I didn't mention the ability to provide additional information, like unit, because it sounds like that's no implemented, right? - [[multiple-events-in-one]] ==== Multiple Metrics in One Event diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/images/kibana-created-indexes.png b/vendor/github.com/elastic/beats/metricbeat/docs/images/kibana-created-indexes.png index a4216300..7035e590 100644 Binary files a/vendor/github.com/elastic/beats/metricbeat/docs/images/kibana-created-indexes.png and b/vendor/github.com/elastic/beats/metricbeat/docs/images/kibana-created-indexes.png differ diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/images/kibana-navigation-vis.png b/vendor/github.com/elastic/beats/metricbeat/docs/images/kibana-navigation-vis.png index 5dc39464..df4dcd5e 100644 Binary files a/vendor/github.com/elastic/beats/metricbeat/docs/images/kibana-navigation-vis.png and b/vendor/github.com/elastic/beats/metricbeat/docs/images/kibana-navigation-vis.png differ diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/index.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/index.asciidoc index 23bec32e..536a76b0 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/index.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/index.asciidoc @@ -1,11 +1,11 @@ = Metricbeat Reference -include::./version.asciidoc[] +include::../../libbeat/docs/version.asciidoc[] :libbeat: http://www.elastic.co/guide/en/beats/libbeat/{doc-branch} :filebeat: http://www.elastic.co/guide/en/beats/filebeat/{doc-branch} :elasticsearch: https://www.elastic.co/guide/en/elasticsearch/reference/{doc-branch} -:securitydoc: https://www.elastic.co/guide/en/x-pack/5.0 +:securitydoc: https://www.elastic.co/guide/en/x-pack/5.2 :version: {stack-version} :beatname_lc: metricbeat :beatname_uc: Metricbeat @@ -19,6 +19,8 @@ include::./command-line.asciidoc[] include::../../libbeat/docs/shared-directory-layout.asciidoc[] +include::../../libbeat/docs/repositories.asciidoc[] + include::./upgrading.asciidoc[] include::./how-metricbeat-works.asciidoc[] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules.asciidoc index ea572984..a4c3e798 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules.asciidoc @@ -7,6 +7,8 @@ This section contains detailed information about the metric collecting modules contained in {beatname_uc}. Each module contains one or multiple metricsets. More details about each module can be found under the links below. +//pass macro block used here to remove Edit links from modules documentation because it is generated +pass::[] include::modules_list.asciidoc[] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/couchbase.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/couchbase.asciidoc new file mode 100644 index 00000000..38ba58ac --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/couchbase.asciidoc @@ -0,0 +1,46 @@ +//// +This file is generated! See scripts/docs_collector.py +//// + +[[metricbeat-module-couchbase]] +== Couchbase Module + +experimental[] + +This module periodically fetches metrics from https://www.couchbase.com/[Couchbase] +servers. + + +[float] +=== Example Configuration + +The Couchbase module supports the standard configuration options that are described +in <>. Here is an example configuration: + +[source,yaml] +---- +metricbeat.modules: +#- module: couchbase + #metricsets: ["cluster", "node", "bucket"] + #enabled: true + #period: 10s + #hosts: ["localhost:8091"] +---- + +[float] +=== Metricsets + +The following metricsets are available: + +* <> + +* <> + +* <> + +include::couchbase/bucket.asciidoc[] + +include::couchbase/cluster.asciidoc[] + +include::couchbase/node.asciidoc[] + diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/couchbase/bucket.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/couchbase/bucket.asciidoc new file mode 100644 index 00000000..4c863cfe --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/couchbase/bucket.asciidoc @@ -0,0 +1,19 @@ +//// +This file is generated! See scripts/docs_collector.py +//// + +[[metricbeat-metricset-couchbase-bucket]] +include::../../../module/couchbase/bucket/_meta/docs.asciidoc[] + + +==== Fields + +For a description of each field in the metricset, see the +<> section. + +Here is an example document generated by this metricset: + +[source,json] +---- +include::../../../module/couchbase/bucket/_meta/data.json[] +---- diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/couchbase/cluster.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/couchbase/cluster.asciidoc new file mode 100644 index 00000000..d311674a --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/couchbase/cluster.asciidoc @@ -0,0 +1,19 @@ +//// +This file is generated! See scripts/docs_collector.py +//// + +[[metricbeat-metricset-couchbase-cluster]] +include::../../../module/couchbase/cluster/_meta/docs.asciidoc[] + + +==== Fields + +For a description of each field in the metricset, see the +<> section. + +Here is an example document generated by this metricset: + +[source,json] +---- +include::../../../module/couchbase/cluster/_meta/data.json[] +---- diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/couchbase/node.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/couchbase/node.asciidoc new file mode 100644 index 00000000..c5f1540a --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/couchbase/node.asciidoc @@ -0,0 +1,19 @@ +//// +This file is generated! See scripts/docs_collector.py +//// + +[[metricbeat-metricset-couchbase-node]] +include::../../../module/couchbase/node/_meta/docs.asciidoc[] + + +==== Fields + +For a description of each field in the metricset, see the +<> section. + +Here is an example document generated by this metricset: + +[source,json] +---- +include::../../../module/couchbase/node/_meta/data.json[] +---- diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/haproxy.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/haproxy.asciidoc index 87add046..14fa204d 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/haproxy.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/haproxy.asciidoc @@ -5,13 +5,15 @@ This file is generated! See scripts/docs_collector.py [[metricbeat-module-haproxy]] == HAProxy Module -This is the haproxy Module. To enable stats collection from HAProxy, you must enable the stats socket via TCP. -For example, placing the following statement under the `global` or `default` section of the haproxy config: +This module collects stats from http://www.haproxy.org/[HAProxy]. To configure +HAProxy to collect stats, you must enable the stats socket via TCP. For example, +to enable stats reporting via any local IP on port 14567, place this statement +under the `global` or `default` section of the haproxy config: `stats socket 127.0.0.1:14567` -will enable stats reporting via any local IP on port 14567. Please note that you should probably use an internal private IP -or secure this with a firewall rule so that only designated hosts can access this data. +NOTE: You should use an internal private IP, or secure this with a firewall +rule, so that only designated hosts can access this data. [float] === Compatibility @@ -22,7 +24,7 @@ The HAProxy metricsets were tested with HAProxy 1.6 and are expected to work wit [float] === Example Configuration -The haproxy module supports the standard configuration options that are described +The HAProxy module supports the standard configuration options that are described in <>. Here is an example configuration: [source,yaml] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/kafka.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/kafka.asciidoc index a8f68041..e606f00b 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/kafka.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/kafka.asciidoc @@ -5,6 +5,8 @@ This file is generated! See scripts/docs_collector.py [[metricbeat-module-kafka]] == kafka Module +experimental[] + This is the kafka Module. @@ -51,7 +53,11 @@ metricbeat.modules: The following metricsets are available: +* <> + * <> +include::kafka/consumergroup.asciidoc[] + include::kafka/partition.asciidoc[] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/kafka/consumergroup.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/kafka/consumergroup.asciidoc new file mode 100644 index 00000000..5f56fcb5 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/kafka/consumergroup.asciidoc @@ -0,0 +1,19 @@ +//// +This file is generated! See scripts/docs_collector.py +//// + +[[metricbeat-metricset-kafka-consumergroup]] +include::../../../module/kafka/consumergroup/_meta/docs.asciidoc[] + + +==== Fields + +For a description of each field in the metricset, see the +<> section. + +Here is an example document generated by this metricset: + +[source,json] +---- +include::../../../module/kafka/consumergroup/_meta/data.json[] +---- diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/prometheus.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/prometheus.asciidoc new file mode 100644 index 00000000..d79b9c81 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/prometheus.asciidoc @@ -0,0 +1,44 @@ +//// +This file is generated! See scripts/docs_collector.py +//// + +[[metricbeat-module-prometheus]] +== Prometheus Module + +beta[] + +This module periodically fetches metrics from +https://prometheus.io/docs/[Prometheus]. + + +[float] +=== Example Configuration + +The Prometheus module supports the standard configuration options that are described +in <>. Here is an example configuration: + +[source,yaml] +---- +metricbeat.modules: +#- module: prometheus + #metricsets: ["stats"] + #enabled: true + #period: 10s + #hosts: ["localhost:9090"] + #metrics_path: /metrics + #namespace: example +---- + +[float] +=== Metricsets + +The following metricsets are available: + +* <> + +* <> + +include::prometheus/collector.asciidoc[] + +include::prometheus/stats.asciidoc[] + diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/prometheus/collector.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/prometheus/collector.asciidoc new file mode 100644 index 00000000..9b1541cf --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/prometheus/collector.asciidoc @@ -0,0 +1,19 @@ +//// +This file is generated! See scripts/docs_collector.py +//// + +[[metricbeat-metricset-prometheus-collector]] +include::../../../module/prometheus/collector/_meta/docs.asciidoc[] + + +==== Fields + +For a description of each field in the metricset, see the +<> section. + +Here is an example document generated by this metricset: + +[source,json] +---- +include::../../../module/prometheus/collector/_meta/data.json[] +---- diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/prometheus/stats.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/prometheus/stats.asciidoc new file mode 100644 index 00000000..10aa159c --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/prometheus/stats.asciidoc @@ -0,0 +1,19 @@ +//// +This file is generated! See scripts/docs_collector.py +//// + +[[metricbeat-metricset-prometheus-stats]] +include::../../../module/prometheus/stats/_meta/docs.asciidoc[] + + +==== Fields + +For a description of each field in the metricset, see the +<> section. + +Here is an example document generated by this metricset: + +[source,json] +---- +include::../../../module/prometheus/stats/_meta/data.json[] +---- diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/system.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/system.asciidoc index 2f9b19bf..d1faf62c 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/system.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/system.asciidoc @@ -48,6 +48,9 @@ metricbeat.modules: cpu_ticks: true ---- +It is strongly recommended to not run docker metricsets with a period smaller then 3 seconds. The request to the docker +API already takes up to 2s seconds. Otherwise all the requests would timeout and no data is reported. + [float] === Dashboard @@ -93,6 +96,9 @@ metricbeat.modules: # Per process stats - process + + # Sockets (linux only) + #- socket enabled: true period: 10s processes: ['.*'] @@ -121,6 +127,8 @@ The following metricsets are available: * <> +* <> + include::system/core.asciidoc[] include::system/cpu.asciidoc[] @@ -139,3 +147,5 @@ include::system/network.asciidoc[] include::system/process.asciidoc[] +include::system/socket.asciidoc[] + diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/system/socket.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/system/socket.asciidoc new file mode 100644 index 00000000..026f9f0c --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/system/socket.asciidoc @@ -0,0 +1,19 @@ +//// +This file is generated! See scripts/docs_collector.py +//// + +[[metricbeat-metricset-system-socket]] +include::../../../module/system/socket/_meta/docs.asciidoc[] + + +==== Fields + +For a description of each field in the metricset, see the +<> section. + +Here is an example document generated by this metricset: + +[source,json] +---- +include::../../../module/system/socket/_meta/data.json[] +---- diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules_list.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules_list.asciidoc index 3229b250..05e489a1 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules_list.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules_list.asciidoc @@ -3,13 +3,15 @@ This file is generated! See scripts/docs_collector.py //// * <> + * <> * <> - * <> + * <> * <> * <> * <> * <> * <> + * <> * <> * <> * <> @@ -18,6 +20,7 @@ This file is generated! See scripts/docs_collector.py -- include::modules/apache.asciidoc[] +include::modules/couchbase.asciidoc[] include::modules/docker.asciidoc[] include::modules/haproxy.asciidoc[] include::modules/kafka.asciidoc[] @@ -25,6 +28,7 @@ include::modules/mongodb.asciidoc[] include::modules/mysql.asciidoc[] include::modules/nginx.asciidoc[] include::modules/postgresql.asciidoc[] +include::modules/prometheus.asciidoc[] include::modules/redis.asciidoc[] include::modules/system.asciidoc[] include::modules/zookeeper.asciidoc[] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/securing-metricbeat.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/securing-metricbeat.asciidoc index f0de9806..8c92870c 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/securing-metricbeat.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/securing-metricbeat.asciidoc @@ -10,12 +10,16 @@ The following topics describe how to secure communication between * <> * <> +//sets block macro for https.asciidoc included in next section + -- [[securing-communication-elasticsearch]] == Securing Communication With Elasticsearch include::../../libbeat/docs/https.asciidoc[] +//sets block macro for shared-ssl-logstash-config.asciidoc included in next section + [[configuring-ssl-logstash]] == Securing Communication With Logstash by Using SSL include::../../libbeat/docs/shared-ssl-logstash-config.asciidoc[] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/troubleshooting.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/troubleshooting.asciidoc index c8d694e6..8c578683 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/troubleshooting.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/troubleshooting.asciidoc @@ -9,6 +9,8 @@ If you have issues installing or running {beatname_uc}, read the following tips: * <> * <> +//sets block macro for getting-help.asciidoc included in next section + -- [[getting-help]] @@ -16,6 +18,8 @@ If you have issues installing or running {beatname_uc}, read the following tips: include::../../libbeat/docs/getting-help.asciidoc[] +//sets block macro for debugging.asciidoc included in next section + [[enable-metricbeat-debugging]] == Debugging diff --git a/vendor/github.com/elastic/beats/metricbeat/include/list.go b/vendor/github.com/elastic/beats/metricbeat/include/list.go index 251a11b4..8d078255 100644 --- a/vendor/github.com/elastic/beats/metricbeat/include/list.go +++ b/vendor/github.com/elastic/beats/metricbeat/include/list.go @@ -10,6 +10,10 @@ import ( // This list is automatically generated by `make imports` _ "github.com/elastic/beats/metricbeat/module/apache" _ "github.com/elastic/beats/metricbeat/module/apache/status" + _ "github.com/elastic/beats/metricbeat/module/couchbase" + _ "github.com/elastic/beats/metricbeat/module/couchbase/bucket" + _ "github.com/elastic/beats/metricbeat/module/couchbase/cluster" + _ "github.com/elastic/beats/metricbeat/module/couchbase/node" _ "github.com/elastic/beats/metricbeat/module/docker" _ "github.com/elastic/beats/metricbeat/module/docker/container" _ "github.com/elastic/beats/metricbeat/module/docker/cpu" @@ -21,6 +25,7 @@ import ( _ "github.com/elastic/beats/metricbeat/module/haproxy/info" _ "github.com/elastic/beats/metricbeat/module/haproxy/stat" _ "github.com/elastic/beats/metricbeat/module/kafka" + _ "github.com/elastic/beats/metricbeat/module/kafka/consumergroup" _ "github.com/elastic/beats/metricbeat/module/kafka/partition" _ "github.com/elastic/beats/metricbeat/module/mongodb" _ "github.com/elastic/beats/metricbeat/module/mongodb/status" @@ -32,6 +37,9 @@ import ( _ "github.com/elastic/beats/metricbeat/module/postgresql/activity" _ "github.com/elastic/beats/metricbeat/module/postgresql/bgwriter" _ "github.com/elastic/beats/metricbeat/module/postgresql/database" + _ "github.com/elastic/beats/metricbeat/module/prometheus" + _ "github.com/elastic/beats/metricbeat/module/prometheus/collector" + _ "github.com/elastic/beats/metricbeat/module/prometheus/stats" _ "github.com/elastic/beats/metricbeat/module/redis" _ "github.com/elastic/beats/metricbeat/module/redis/info" _ "github.com/elastic/beats/metricbeat/module/redis/keyspace" @@ -45,6 +53,7 @@ import ( _ "github.com/elastic/beats/metricbeat/module/system/memory" _ "github.com/elastic/beats/metricbeat/module/system/network" _ "github.com/elastic/beats/metricbeat/module/system/process" + _ "github.com/elastic/beats/metricbeat/module/system/socket" _ "github.com/elastic/beats/metricbeat/module/zookeeper" _ "github.com/elastic/beats/metricbeat/module/zookeeper/mntr" ) diff --git a/vendor/github.com/elastic/beats/metricbeat/mb/builders.go b/vendor/github.com/elastic/beats/metricbeat/mb/builders.go index 921ae5ee..90213315 100644 --- a/vendor/github.com/elastic/beats/metricbeat/mb/builders.go +++ b/vendor/github.com/elastic/beats/metricbeat/mb/builders.go @@ -89,6 +89,11 @@ func newBaseModuleFromConfig(rawConfig *common.Config) (BaseModule, error) { return baseModule, err } + // If timeout is not set, timeout is set to the same value as period + if baseModule.config.Timeout == 0 { + baseModule.config.Timeout = baseModule.config.Period + } + baseModule.name = strings.ToLower(baseModule.config.Module) err = mustNotContainDuplicates(baseModule.config.Hosts) diff --git a/vendor/github.com/elastic/beats/metricbeat/mb/mb.go b/vendor/github.com/elastic/beats/metricbeat/mb/mb.go index 4b1c36f3..ce6c4abf 100644 --- a/vendor/github.com/elastic/beats/metricbeat/mb/mb.go +++ b/vendor/github.com/elastic/beats/metricbeat/mb/mb.go @@ -183,7 +183,6 @@ func (c ModuleConfig) GoString() string { return c.String() } var defaultModuleConfig = ModuleConfig{ Enabled: true, Period: time.Second * 10, - Timeout: time.Second, } // DefaultModuleConfig returns a ModuleConfig with the default values populated. diff --git a/vendor/github.com/elastic/beats/metricbeat/mb/mb_test.go b/vendor/github.com/elastic/beats/metricbeat/mb/mb_test.go index a0ba3120..cd605d07 100644 --- a/vendor/github.com/elastic/beats/metricbeat/mb/mb_test.go +++ b/vendor/github.com/elastic/beats/metricbeat/mb/mb_test.go @@ -61,7 +61,7 @@ func TestModuleConfig(t *testing.T) { MetricSets: []string{"test"}, Enabled: true, Period: time.Second * 10, - Timeout: time.Second, + Timeout: 0, }, }, { @@ -124,7 +124,7 @@ func TestModuleConfigDefaults(t *testing.T) { assert.Equal(t, true, mc.Enabled) assert.Equal(t, time.Second*10, mc.Period) - assert.Equal(t, time.Second, mc.Timeout) + assert.Equal(t, time.Second*0, mc.Timeout) assert.Empty(t, mc.Hosts) } @@ -249,7 +249,7 @@ func TestNewBaseModuleFromModuleConfigStruct(t *testing.T) { assert.Equal(t, moduleName, baseModule.Config().Module) assert.Equal(t, true, baseModule.Config().Enabled) assert.Equal(t, time.Second*10, baseModule.Config().Period) - assert.Equal(t, time.Second, baseModule.Config().Timeout) + assert.Equal(t, time.Second*10, baseModule.Config().Timeout) assert.Empty(t, baseModule.Config().Hosts) } diff --git a/vendor/github.com/elastic/beats/metricbeat/mb/testing/data_generator.go b/vendor/github.com/elastic/beats/metricbeat/mb/testing/data_generator.go index ffd21bb3..9a95b48b 100644 --- a/vendor/github.com/elastic/beats/metricbeat/mb/testing/data_generator.go +++ b/vendor/github.com/elastic/beats/metricbeat/mb/testing/data_generator.go @@ -3,6 +3,7 @@ package testing import ( "encoding/json" "flag" + "fmt" "io/ioutil" "os" "testing" @@ -33,15 +34,18 @@ func WriteEvent(f mb.EventFetcher, t *testing.T) error { } func WriteEvents(f mb.EventsFetcher, t *testing.T) error { - if !*dataFlag { t.Skip("Skip data generation tests") } + events, err := f.Fetch() if err != nil { return err } + if len(events) == 0 { + return fmt.Errorf("no events were generated") + } return createEvent(events[0], f) } diff --git a/vendor/github.com/elastic/beats/metricbeat/metricbeat.full.yml b/vendor/github.com/elastic/beats/metricbeat/metricbeat.full.yml index e791cb4b..ef13df49 100644 --- a/vendor/github.com/elastic/beats/metricbeat/metricbeat.full.yml +++ b/vendor/github.com/elastic/beats/metricbeat/metricbeat.full.yml @@ -39,6 +39,9 @@ metricbeat.modules: # Per process stats - process + + # Sockets and connection info (linux only) + #- socket enabled: true period: 10s processes: ['.*'] @@ -49,6 +52,11 @@ metricbeat.modules: # EXPERIMENTAL: cgroups can be enabled for the process metricset. #cgroups: false + # Configure reverse DNS lookup on remote IP addresses in the socket metricset. + #socket.reverse_lookup.enabled: false + #socket.reverse_lookup.success_ttl: 60s + #socket.reverse_lookup.failure_ttl: 60s + #------------------------------- Apache Module ------------------------------- #- module: apache #metricsets: ["status"] @@ -67,6 +75,13 @@ metricbeat.modules: # Password of hosts. Empty by default #password: test123 +#------------------------------ Couchbase Module ----------------------------- +#- module: couchbase + #metricsets: ["cluster", "node", "bucket"] + #enabled: true + #period: 10s + #hosts: ["localhost:8091"] + #------------------------------- Docker Module ------------------------------- #- module: docker #metricsets: ["cpu", "info", "memory", "network", "diskio", "container"] @@ -80,7 +95,7 @@ metricbeat.modules: #certificate: "/etc/pki/client/cert.pem" #key: "/etc/pki/client/cert.key" -#------------------------------- haproxy Module ------------------------------ +#------------------------------- HAProxy Module ------------------------------ #- module: haproxy #metricsets: ["info", "stat"] #enabled: true @@ -195,6 +210,15 @@ metricbeat.modules: #password: pass +#----------------------------- Prometheus Module ----------------------------- +#- module: prometheus + #metricsets: ["stats"] + #enabled: true + #period: 10s + #hosts: ["localhost:9090"] + #metrics_path: /metrics + #namespace: example + #-------------------------------- Redis Module ------------------------------- #- module: redis #metricsets: ["info", "keyspace"] @@ -204,9 +228,6 @@ metricbeat.modules: # Redis hosts #hosts: ["127.0.0.1:6379"] - # Enabled defines if the module is enabled. Default: true - #enabled: true - # Timeout after which time a metricset should return an error # Timeout is by default defined as period, as a fetch of a metricset # should never take longer then period, as otherwise calls can pile up. diff --git a/vendor/github.com/elastic/beats/metricbeat/metricbeat.template-es2x.json b/vendor/github.com/elastic/beats/metricbeat/metricbeat.template-es2x.json index 5fb187ca..b000be98 100644 --- a/vendor/github.com/elastic/beats/metricbeat/metricbeat.template-es2x.json +++ b/vendor/github.com/elastic/beats/metricbeat/metricbeat.template-es2x.json @@ -7,7 +7,7 @@ } }, "_meta": { - "version": "5.1.2" + "version": "5.2.2" }, "dynamic_templates": [ { @@ -184,6 +184,392 @@ } } }, + "couchbase": { + "properties": { + "bucket": { + "properties": { + "data": { + "properties": { + "used": { + "properties": { + "bytes": { + "type": "long" + } + } + } + } + }, + "disk": { + "properties": { + "fetches": { + "type": "long" + }, + "used": { + "properties": { + "bytes": { + "type": "long" + } + } + } + } + }, + "item_count": { + "type": "long" + }, + "memory": { + "properties": { + "used": { + "properties": { + "bytes": { + "type": "long" + } + } + } + } + }, + "name": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "ops_per_sec": { + "type": "long" + }, + "quota": { + "properties": { + "ram": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "use": { + "properties": { + "pct": { + "type": "float" + } + } + } + } + }, + "type": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + } + } + }, + "cluster": { + "properties": { + "hdd": { + "properties": { + "free": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "quota": { + "properties": { + "total": { + "properties": { + "bytes": { + "type": "long" + } + } + } + } + }, + "total": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "used": { + "properties": { + "by_data": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "value": { + "properties": { + "bytes": { + "type": "long" + } + } + } + } + } + } + }, + "max_bucket_count": { + "type": "long" + }, + "quota": { + "properties": { + "index_memory": { + "properties": { + "mb": { + "type": "long" + } + } + }, + "memory": { + "properties": { + "mb": { + "type": "long" + } + } + } + } + }, + "ram": { + "properties": { + "quota": { + "properties": { + "total": { + "properties": { + "per_node": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "value": { + "properties": { + "bytes": { + "type": "long" + } + } + } + } + }, + "used": { + "properties": { + "per_node": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "value": { + "properties": { + "bytes": { + "type": "long" + } + } + } + } + } + } + }, + "total": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "used": { + "properties": { + "by_data": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "value": { + "properties": { + "bytes": { + "type": "long" + } + } + } + } + } + } + } + } + }, + "node": { + "properties": { + "cmd_get": { + "type": "long" + }, + "couch": { + "properties": { + "docs": { + "properties": { + "actual_disk_size": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "data_size": { + "properties": { + "bytes": { + "type": "long" + } + } + } + } + }, + "spatial": { + "properties": { + "data_size": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "disk_size": { + "properties": { + "bytes": { + "type": "long" + } + } + } + } + }, + "views": { + "properties": { + "actual_disk_size": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "data_size": { + "properties": { + "bytes": { + "type": "long" + } + } + } + } + } + } + }, + "cpu_utilization_rate": { + "properties": { + "pct": { + "type": "float" + } + } + }, + "current_items": { + "properties": { + "total": { + "type": "long" + }, + "value": { + "type": "long" + } + } + }, + "ep_bg_fetched": { + "type": "long" + }, + "get_hits": { + "type": "long" + }, + "hostname": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "mcd_memory": { + "properties": { + "allocated": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "reserved": { + "properties": { + "bytes": { + "type": "long" + } + } + } + } + }, + "memory": { + "properties": { + "free": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "total": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "used": { + "properties": { + "bytes": { + "type": "long" + } + } + } + } + }, + "ops": { + "type": "long" + }, + "swap": { + "properties": { + "total": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "used": { + "properties": { + "bytes": { + "type": "long" + } + } + } + } + }, + "uptime": { + "properties": { + "sec": { + "type": "long" + } + } + }, + "vb_replica_curr_items": { + "type": "long" + } + } + } + } + }, "docker": { "properties": { "container": { @@ -925,6 +1311,71 @@ }, "kafka": { "properties": { + "consumergroup": { + "properties": { + "broker": { + "properties": { + "address": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "id": { + "type": "long" + } + } + }, + "client": { + "properties": { + "host": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "id": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "member_id": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + } + } + }, + "error": { + "properties": { + "code": { + "type": "long" + } + } + }, + "id": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "meta": { + "index": "analyzed", + "norms": { + "enabled": false + }, + "type": "string" + }, + "offset": { + "type": "long" + }, + "partition": { + "type": "long" + }, + "topic": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + } + } + }, "partition": { "properties": { "broker": { @@ -1047,6 +1498,11 @@ "index": "not_analyzed", "type": "string" }, + "namespace": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, "rtt": { "type": "long" } @@ -1494,6 +1950,22 @@ } } }, + "command": { + "properties": { + "delete": { + "type": "long" + }, + "insert": { + "type": "long" + }, + "select": { + "type": "long" + }, + "update": { + "type": "long" + } + } + }, "connections": { "type": "long" }, @@ -1839,6 +2311,38 @@ } } }, + "prometheus": { + "properties": { + "stats": { + "properties": { + "notifications": { + "properties": { + "dropped": { + "type": "long" + }, + "queue_length": { + "type": "long" + } + } + }, + "processes": { + "properties": { + "open_fds": { + "type": "long" + } + } + }, + "storage": { + "properties": { + "chunks_to_persist": { + "type": "long" + } + } + } + } + } + } + }, "redis": { "properties": { "info": { @@ -2326,6 +2830,9 @@ }, "cpu": { "properties": { + "cores": { + "type": "long" + }, "idle": { "properties": { "pct": { @@ -3106,6 +3613,93 @@ "type": "string" } } + }, + "socket": { + "properties": { + "direction": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "family": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "local": { + "properties": { + "ip": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "port": { + "type": "long" + } + } + }, + "process": { + "properties": { + "cmdline": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "command": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "exe": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "pid": { + "type": "long" + } + } + }, + "remote": { + "properties": { + "etld_plus_one": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "host": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "host_error": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "ip": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "port": { + "type": "long" + } + } + }, + "user": { + "properties": { + "id": { + "type": "long" + }, + "name": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + } + } + } + } } } }, diff --git a/vendor/github.com/elastic/beats/metricbeat/metricbeat.template.json b/vendor/github.com/elastic/beats/metricbeat/metricbeat.template.json index 4e5c330d..dc3ba050 100644 --- a/vendor/github.com/elastic/beats/metricbeat/metricbeat.template.json +++ b/vendor/github.com/elastic/beats/metricbeat/metricbeat.template.json @@ -5,7 +5,7 @@ "norms": false }, "_meta": { - "version": "5.1.2" + "version": "5.2.2" }, "dynamic_templates": [ { @@ -188,6 +188,391 @@ } } }, + "couchbase": { + "properties": { + "bucket": { + "properties": { + "data": { + "properties": { + "used": { + "properties": { + "bytes": { + "type": "long" + } + } + } + } + }, + "disk": { + "properties": { + "fetches": { + "type": "long" + }, + "used": { + "properties": { + "bytes": { + "type": "long" + } + } + } + } + }, + "item_count": { + "type": "long" + }, + "memory": { + "properties": { + "used": { + "properties": { + "bytes": { + "type": "long" + } + } + } + } + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "ops_per_sec": { + "type": "long" + }, + "quota": { + "properties": { + "ram": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "use": { + "properties": { + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + } + } + } + } + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "cluster": { + "properties": { + "hdd": { + "properties": { + "free": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "quota": { + "properties": { + "total": { + "properties": { + "bytes": { + "type": "long" + } + } + } + } + }, + "total": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "used": { + "properties": { + "by_data": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "value": { + "properties": { + "bytes": { + "type": "long" + } + } + } + } + } + } + }, + "max_bucket_count": { + "type": "long" + }, + "quota": { + "properties": { + "index_memory": { + "properties": { + "mb": { + "type": "long" + } + } + }, + "memory": { + "properties": { + "mb": { + "type": "long" + } + } + } + } + }, + "ram": { + "properties": { + "quota": { + "properties": { + "total": { + "properties": { + "per_node": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "value": { + "properties": { + "bytes": { + "type": "long" + } + } + } + } + }, + "used": { + "properties": { + "per_node": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "value": { + "properties": { + "bytes": { + "type": "long" + } + } + } + } + } + } + }, + "total": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "used": { + "properties": { + "by_data": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "value": { + "properties": { + "bytes": { + "type": "long" + } + } + } + } + } + } + } + } + }, + "node": { + "properties": { + "cmd_get": { + "type": "long" + }, + "couch": { + "properties": { + "docs": { + "properties": { + "actual_disk_size": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "data_size": { + "properties": { + "bytes": { + "type": "long" + } + } + } + } + }, + "spatial": { + "properties": { + "data_size": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "disk_size": { + "properties": { + "bytes": { + "type": "long" + } + } + } + } + }, + "views": { + "properties": { + "actual_disk_size": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "data_size": { + "properties": { + "bytes": { + "type": "long" + } + } + } + } + } + } + }, + "cpu_utilization_rate": { + "properties": { + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + } + } + }, + "current_items": { + "properties": { + "total": { + "type": "long" + }, + "value": { + "type": "long" + } + } + }, + "ep_bg_fetched": { + "type": "long" + }, + "get_hits": { + "type": "long" + }, + "hostname": { + "ignore_above": 1024, + "type": "keyword" + }, + "mcd_memory": { + "properties": { + "allocated": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "reserved": { + "properties": { + "bytes": { + "type": "long" + } + } + } + } + }, + "memory": { + "properties": { + "free": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "total": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "used": { + "properties": { + "bytes": { + "type": "long" + } + } + } + } + }, + "ops": { + "type": "long" + }, + "swap": { + "properties": { + "total": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "used": { + "properties": { + "bytes": { + "type": "long" + } + } + } + } + }, + "uptime": { + "properties": { + "sec": { + "type": "long" + } + } + }, + "vb_replica_curr_items": { + "type": "long" + } + } + } + } + }, "docker": { "properties": { "container": { @@ -933,6 +1318,62 @@ }, "kafka": { "properties": { + "consumergroup": { + "properties": { + "broker": { + "properties": { + "address": { + "ignore_above": 1024, + "type": "keyword" + }, + "id": { + "type": "long" + } + } + }, + "client": { + "properties": { + "host": { + "ignore_above": 1024, + "type": "keyword" + }, + "id": { + "ignore_above": 1024, + "type": "keyword" + }, + "member_id": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "error": { + "properties": { + "code": { + "type": "long" + } + } + }, + "id": { + "ignore_above": 1024, + "type": "keyword" + }, + "meta": { + "norms": false, + "type": "text" + }, + "offset": { + "type": "long" + }, + "partition": { + "type": "long" + }, + "topic": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, "partition": { "properties": { "broker": { @@ -1044,6 +1485,10 @@ "ignore_above": 1024, "type": "keyword" }, + "namespace": { + "ignore_above": 1024, + "type": "keyword" + }, "rtt": { "type": "long" } @@ -1489,6 +1934,22 @@ } } }, + "command": { + "properties": { + "delete": { + "type": "long" + }, + "insert": { + "type": "long" + }, + "select": { + "type": "long" + }, + "update": { + "type": "long" + } + } + }, "connections": { "type": "long" }, @@ -1825,6 +2286,38 @@ } } }, + "prometheus": { + "properties": { + "stats": { + "properties": { + "notifications": { + "properties": { + "dropped": { + "type": "long" + }, + "queue_length": { + "type": "long" + } + } + }, + "processes": { + "properties": { + "open_fds": { + "type": "long" + } + } + }, + "storage": { + "properties": { + "chunks_to_persist": { + "type": "long" + } + } + } + } + } + } + }, "redis": { "properties": { "info": { @@ -2309,6 +2802,9 @@ }, "cpu": { "properties": { + "cores": { + "type": "long" + }, "idle": { "properties": { "pct": { @@ -3090,6 +3586,80 @@ "type": "keyword" } } + }, + "socket": { + "properties": { + "direction": { + "ignore_above": 1024, + "type": "keyword" + }, + "family": { + "ignore_above": 1024, + "type": "keyword" + }, + "local": { + "properties": { + "ip": { + "type": "ip" + }, + "port": { + "type": "long" + } + } + }, + "process": { + "properties": { + "cmdline": { + "ignore_above": 1024, + "type": "keyword" + }, + "command": { + "ignore_above": 1024, + "type": "keyword" + }, + "exe": { + "ignore_above": 1024, + "type": "keyword" + }, + "pid": { + "type": "long" + } + } + }, + "remote": { + "properties": { + "etld_plus_one": { + "ignore_above": 1024, + "type": "keyword" + }, + "host": { + "ignore_above": 1024, + "type": "keyword" + }, + "host_error": { + "ignore_above": 1024, + "type": "keyword" + }, + "ip": { + "type": "ip" + }, + "port": { + "type": "long" + } + } + }, + "user": { + "properties": { + "id": { + "type": "long" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + } + } + } + } } } }, @@ -3182,6 +3752,7 @@ }, "order": 0, "settings": { + "index.mapping.total_fields.limit": 10000, "index.refresh_interval": "5s" }, "template": "metricbeat-*" diff --git a/vendor/github.com/elastic/beats/metricbeat/metricbeat.yml b/vendor/github.com/elastic/beats/metricbeat/metricbeat.yml index 54328186..657ea618 100644 --- a/vendor/github.com/elastic/beats/metricbeat/metricbeat.yml +++ b/vendor/github.com/elastic/beats/metricbeat/metricbeat.yml @@ -39,6 +39,9 @@ metricbeat.modules: # Per process stats - process + + # Sockets (linux only) + #- socket enabled: true period: 10s processes: ['.*'] diff --git a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/_meta/Dockerfile b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/_meta/Dockerfile new file mode 100644 index 00000000..51409c59 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/_meta/Dockerfile @@ -0,0 +1,5 @@ +FROM couchbase:4.5.1 + +COPY configure-node.sh /opt/couchbase + +CMD ["/opt/couchbase/configure-node.sh"] diff --git a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/_meta/config.yml b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/_meta/config.yml new file mode 100644 index 00000000..c6bb0df5 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/_meta/config.yml @@ -0,0 +1,5 @@ +#- module: couchbase + #metricsets: ["cluster", "node", "bucket"] + #enabled: true + #period: 10s + #hosts: ["localhost:8091"] diff --git a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/_meta/configure-node.sh b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/_meta/configure-node.sh new file mode 100755 index 00000000..467b3e33 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/_meta/configure-node.sh @@ -0,0 +1,38 @@ +set -e +set -m + +/entrypoint.sh couchbase-server & + +sleep 1 + +waitForCouchbase() { + echo -n "Waiting for Couchbase to start." + for ((i=1; i<=90; i++)) do + if nc -vz 127.0.0.1 8091 2>/dev/null; then + return 0 + fi + + ((i++)) + echo -n '.' + sleep 1 + done + + echo + echo >&2 "Failed to Start Couchbase" +} + +waitForCouchbase + +# Setup index and memory quota +curl -v -X POST http://127.0.0.1:8091/pools/default -d memoryQuota=300 -d indexMemoryQuota=300 + +# Setup services +curl -v http://127.0.0.1:8091/node/controller/setupServices -d services=kv%2Cn1ql%2Cindex + +# Setup credentials +curl -v http://127.0.0.1:8091/settings/web -d port=8091 -d username=Administrator -d password=password + +# Load travel-sample bucket +curl -v -u Administrator:password -X POST http://127.0.0.1:8091/sampleBuckets/install -d '["beer-sample"]' + +fg 1 diff --git a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/_meta/docs.asciidoc new file mode 100644 index 00000000..54d53679 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/_meta/docs.asciidoc @@ -0,0 +1,6 @@ +== Couchbase Module + +experimental[] + +This module periodically fetches metrics from https://www.couchbase.com/[Couchbase] +servers. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/_meta/fields.yml new file mode 100644 index 00000000..706cacf7 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/_meta/fields.yml @@ -0,0 +1,13 @@ +- key: couchbase + title: "Couchbase" + description: > + experimental[] + + Metrics collected from Couchbase servers. + short_config: false + fields: + - name: couchbase + type: group + description: > + `couchbase` contains the metrics that were scraped from Couchbase. + fields: diff --git a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/bucket/_meta/data.json b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/bucket/_meta/data.json new file mode 100644 index 00000000..16f70bef --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/bucket/_meta/data.json @@ -0,0 +1,45 @@ +{ + "@timestamp": "2016-05-23T08:05:34.853Z", + "beat": { + "hostname": "host.example.com", + "name": "host.example.com" + }, + "couchbase": { + "bucket": { + "data": { + "used": { + "bytes": 12597798 + } + }, + "disk": { + "fetches": 0, + "used": { + "bytes": 16368995 + } + }, + "item_count": 7303, + "memory": { + "used": { + "bytes": 53962160 + } + }, + "name": "beer-sample", + "ops_per_sec": 0, + "quota": { + "ram": { + "bytes": 104857600 + }, + "use": { + "pct": 51.46232604980469 + } + } + } + }, + "metricset": { + "host": "couchbase:8091", + "module": "couchbase", + "name": "bucket", + "rtt": 115 + }, + "type": "membase" +} \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/bucket/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/bucket/_meta/docs.asciidoc new file mode 100644 index 00000000..e7f208b8 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/bucket/_meta/docs.asciidoc @@ -0,0 +1,4 @@ +=== Couchbase Bucket Metricset + +The Couchbase `bucket` metricset collects data from the Couchbase Buckets Rest API +`http://couchbasehost:8091/pools/default/buckets`. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/bucket/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/bucket/_meta/fields.yml new file mode 100644 index 00000000..a9c3fc46 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/bucket/_meta/fields.yml @@ -0,0 +1,50 @@ +- name: bucket + type: group + description: > + Couchbase bucket metrics. + fields: + - name: name + type: keyword + description: > + Name of the bucket. + - name: type + type: keyword + description: > + Type of the bucket. + - name: data.used.bytes + format: bytes + type: long + description: > + Size of user data within buckets of the specified state that are resident in RAM. + - name: disk.fetches + type: long + description: > + Number of disk fetches. + - name: disk.used.bytes + format: bytes + type: long + description: > + Amount of disk used (bytes). + - name: memory.used.bytes + format: bytes + type: long + description: > + Amount of memory used by the bucket (bytes). + - name: quota.ram.bytes + format: bytes + type: long + description: > + Amount of RAM used by the bucket (bytes). + - name: quota.use.pct + format: percent + type: scaled_float + description: > + Percentage of RAM used (for active objects) against the configured bucket size (%). + - name: ops_per_sec + type: long + description: > + Number of operations per second. + - name: item_count + type: long + description: > + Number of items associated with the bucket. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/bucket/bucket.go b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/bucket/bucket.go new file mode 100644 index 00000000..6d95b315 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/bucket/bucket.go @@ -0,0 +1,83 @@ +package bucket + +import ( + "fmt" + "net/http" + + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/logp" + "github.com/elastic/beats/metricbeat/mb" + "github.com/elastic/beats/metricbeat/mb/parse" +) + +const ( + defaultScheme = "http" + defaultPath = "/pools/default/buckets" +) + +var ( + hostParser = parse.URLHostParserBuilder{ + DefaultScheme: defaultScheme, + DefaultPath: defaultPath, + }.Build() +) + +// init registers the MetricSet with the central registry. +// The New method will be called after the setup of the module and before starting to fetch data +func init() { + if err := mb.Registry.AddMetricSet("couchbase", "bucket", New, hostParser); err != nil { + panic(err) + } +} + +// MetricSet type defines all fields of the MetricSet +// As a minimum it must inherit the mb.BaseMetricSet fields, but can be extended with +// additional entries. These variables can be used to persist data or configuration between +// multiple fetch calls. +type MetricSet struct { + mb.BaseMetricSet + client *http.Client +} + +// New create a new instance of the MetricSet +// Part of new is also setting up the configuration by processing additional +// configuration entries if needed. +func New(base mb.BaseMetricSet) (mb.MetricSet, error) { + logp.Warn("EXPERIMENTAL: The couchbase bucket metricset is experimental") + + config := struct{}{} + + if err := base.Module().UnpackConfig(&config); err != nil { + return nil, err + } + + return &MetricSet{ + BaseMetricSet: base, + client: &http.Client{Timeout: base.Module().Config().Timeout}, + }, nil +} + +// Fetch methods implements the data gathering and data conversion to the right format +// It returns the event which is then forward to the output. In case of an error, a +// descriptive error must be returned. +func (m *MetricSet) Fetch() ([]common.MapStr, error) { + req, err := http.NewRequest("GET", m.HostData().SanitizedURI, nil) + if m.HostData().User != "" || m.HostData().Password != "" { + req.SetBasicAuth(m.HostData().User, m.HostData().Password) + } + + resp, err := m.client.Do(req) + + if err != nil { + return nil, fmt.Errorf("error making http request: %v", err) + } + + defer resp.Body.Close() + + if resp.StatusCode != 200 { + return nil, fmt.Errorf("Error Connecting to Couchbase %d: %s", resp.StatusCode, resp.Status) + } + + return eventsMapping(resp.Body), nil + +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/bucket/bucket_integration_test.go b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/bucket/bucket_integration_test.go new file mode 100644 index 00000000..e7f909d1 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/bucket/bucket_integration_test.go @@ -0,0 +1,27 @@ +// +build integration + +package bucket + +import ( + "testing" + + mbtest "github.com/elastic/beats/metricbeat/mb/testing" + "github.com/elastic/beats/metricbeat/module/couchbase" +) + +func TestData(t *testing.T) { + f := mbtest.NewEventsFetcher(t, getConfig()) + + err := mbtest.WriteEvents(f, t) + if err != nil { + t.Fatal("write", err) + } +} + +func getConfig() map[string]interface{} { + return map[string]interface{}{ + "module": "couchbase", + "metricsets": []string{"bucket"}, + "hosts": []string{couchbase.GetEnvDSN()}, + } +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/bucket/bucket_test.go b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/bucket/bucket_test.go new file mode 100644 index 00000000..0d849d63 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/bucket/bucket_test.go @@ -0,0 +1,70 @@ +// +build !integration + +package bucket + +import ( + "io/ioutil" + "net/http" + "net/http/httptest" + "path/filepath" + "testing" + + "github.com/elastic/beats/libbeat/common" + mbtest "github.com/elastic/beats/metricbeat/mb/testing" + + "github.com/stretchr/testify/assert" +) + +func TestFetchEventContents(t *testing.T) { + absPath, err := filepath.Abs("./testdata/") + // response is a raw response from a couchbase + response, err := ioutil.ReadFile(absPath + "/sample_response.json") + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(200) + w.Header().Set("Content-Type", "appication/json;") + w.Write([]byte(response)) + })) + defer server.Close() + + config := map[string]interface{}{ + "module": "couchbase", + "metricsets": []string{"bucket"}, + "hosts": []string{server.URL}, + } + + f := mbtest.NewEventsFetcher(t, config) + events, err := f.Fetch() + event := events[0] + if !assert.NoError(t, err) { + t.FailNow() + } + + t.Logf("%s/%s event: %+v", f.Module().Name(), f.Name(), event.StringToPrint()) + + assert.EqualValues(t, "membase", event["type"]) + assert.EqualValues(t, "beer-sample", event["name"]) + + data := event["data"].(common.MapStr) + data_used := data["used"].(common.MapStr) + assert.EqualValues(t, 12597731, data_used["bytes"]) + + disk := event["disk"].(common.MapStr) + assert.EqualValues(t, 0, disk["fetches"]) + + disk_used := disk["used"].(common.MapStr) + assert.EqualValues(t, 16369008, disk_used["bytes"]) + + memory := event["memory"].(common.MapStr) + memory_used := memory["used"].(common.MapStr) + assert.EqualValues(t, 53962160, memory_used["bytes"]) + + quota := event["quota"].(common.MapStr) + quota_ram := quota["ram"].(common.MapStr) + assert.EqualValues(t, 104857600, quota_ram["bytes"]) + + quota_use := quota["use"].(common.MapStr) + assert.EqualValues(t, 51.46232604980469, quota_use["pct"]) + + assert.EqualValues(t, 7303, event["item_count"]) + assert.EqualValues(t, 0, event["ops_per_sec"]) +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/bucket/data.go b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/bucket/data.go new file mode 100644 index 00000000..e026d0b5 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/bucket/data.go @@ -0,0 +1,79 @@ +package bucket + +import ( + "encoding/json" + "io" + + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/logp" +) + +type BucketQuota struct { + RAM int64 `json:"ram"` + RawRAM int64 `json:"rawRAM"` +} + +type BucketBasicStats struct { + QuotaPercentUsed float64 `json:"quotaPercentUsed"` + OpsPerSec int64 `json:"opsPerSec"` + DiskFetches int64 `json:"diskFetches"` + ItemCount int64 `json:"itemCount"` + DiskUsed int64 `json:"diskUsed"` + DataUsed int64 `json:"dataUsed"` + MemUsed int64 `json:"memUsed"` +} + +type Buckets []struct { + Name string `json:"name"` + BucketType string `json:"bucketType"` + Quota BucketQuota `json:"quota"` + BasicStats BucketBasicStats `json:"basicStats"` +} + +func eventsMapping(body io.Reader) []common.MapStr { + + var d Buckets + err := json.NewDecoder(body).Decode(&d) + if err != nil { + logp.Err("Error: ", err) + } + + events := []common.MapStr{} + + for _, Bucket := range d { + event := common.MapStr{ + "name": Bucket.Name, + "type": Bucket.BucketType, + "data": common.MapStr{ + "used": common.MapStr{ + "bytes": Bucket.BasicStats.DataUsed, + }, + }, + "disk": common.MapStr{ + "fetches": Bucket.BasicStats.DiskFetches, + "used": common.MapStr{ + "bytes": Bucket.BasicStats.DiskUsed, + }, + }, + "memory": common.MapStr{ + "used": common.MapStr{ + "bytes": Bucket.BasicStats.MemUsed, + }, + }, + "quota": common.MapStr{ + "ram": common.MapStr{ + "bytes": Bucket.Quota.RAM, + }, + "use": common.MapStr{ + "pct": Bucket.BasicStats.QuotaPercentUsed, + }, + }, + "ops_per_sec": Bucket.BasicStats.OpsPerSec, + "item_count": Bucket.BasicStats.ItemCount, + } + + events = append(events, event) + } + + return events +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/bucket/testdata/sample_response.json b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/bucket/testdata/sample_response.json new file mode 100644 index 00000000..5eedc620 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/bucket/testdata/sample_response.json @@ -0,0 +1,4218 @@ +[ + { + "name": "beer-sample", + "bucketType": "membase", + "authType": "sasl", + "saslPassword": "", + "proxyPort": 0, + "replicaIndex": false, + "uri": "/pools/default/buckets/beer-sample?bucket_uuid=17f61761bfd479f2aa0ab7be01e0c6f6", + "streamingUri": "/pools/default/bucketsStreaming/beer-sample?bucket_uuid=17f61761bfd479f2aa0ab7be01e0c6f6", + "localRandomKeyUri": "/pools/default/buckets/beer-sample/localRandomKey", + "controllers": { + "compactAll": "/pools/default/buckets/beer-sample/controller/compactBucket", + "compactDB": "/pools/default/buckets/default/controller/compactDatabases", + "purgeDeletes": "/pools/default/buckets/beer-sample/controller/unsafePurgeBucket", + "startRecovery": "/pools/default/buckets/beer-sample/controller/startRecovery" + }, + "nodes": [ + { + "couchApiBaseHTTPS": "https://172.17.0.2:18092/beer-sample%2B17f61761bfd479f2aa0ab7be01e0c6f6", + "couchApiBase": "http://172.17.0.2:8092/beer-sample%2B17f61761bfd479f2aa0ab7be01e0c6f6", + "systemStats": { + "cpu_utilization_rate": 4.199475065616798, + "swap_total": 4189057024, + "swap_used": 28672, + "mem_total": 8359174144, + "mem_free": 4839604224 + }, + "interestingStats": { + "cmd_get": 0, + "couch_docs_actual_disk_size": 13563789, + "couch_docs_data_size": 9792512, + "couch_spatial_data_size": 0, + "couch_spatial_disk_size": 0, + "couch_views_actual_disk_size": 2805219, + "couch_views_data_size": 2805219, + "curr_items": 7303, + "curr_items_tot": 7303, + "ep_bg_fetched": 0, + "get_hits": 0, + "mem_used": 53962160, + "ops": 0, + "vb_replica_curr_items": 0 + }, + "uptime": "4815", + "memoryTotal": 8359174144, + "memoryFree": 4839604224, + "mcdMemoryReserved": 6377, + "mcdMemoryAllocated": 6377, + "replication": 0, + "clusterMembership": "active", + "recoveryType": "none", + "status": "healthy", + "otpNode": "ns_1@127.0.0.1", + "thisNode": true, + "hostname": "172.17.0.2:8091", + "clusterCompatibility": 262149, + "version": "4.5.1-2844-enterprise", + "os": "x86_64-unknown-linux-gnu", + "ports": { + "sslProxy": 11214, + "httpsMgmt": 18091, + "httpsCAPI": 18092, + "proxy": 11211, + "direct": 11210 + }, + "services": [ + "index", + "kv", + "n1ql" + ] + } + ], + "stats": { + "uri": "/pools/default/buckets/beer-sample/stats", + "directoryURI": "/pools/default/buckets/beer-sample/statsDirectory", + "nodeStatsListURI": "/pools/default/buckets/beer-sample/nodes" + }, + "ddocs": { + "uri": "/pools/default/buckets/beer-sample/ddocs" + }, + "nodeLocator": "vbucket", + "autoCompactionSettings": false, + "uuid": "17f61761bfd479f2aa0ab7be01e0c6f6", + "vBucketServerMap": { + "hashAlgorithm": "CRC", + "numReplicas": 1, + "serverList": [ + "172.17.0.2:11210" + ], + "vBucketMap": [ + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ], + [ + 0, + -1 + ] + ] + }, + "replicaNumber": 1, + "threadsNumber": 3, + "quota": { + "ram": 104857600, + "rawRAM": 104857600 + }, + "basicStats": { + "quotaPercentUsed": 51.46232604980469, + "opsPerSec": 0, + "diskFetches": 0, + "itemCount": 7303, + "diskUsed": 16369008, + "dataUsed": 12597731, + "memUsed": 53962160 + }, + "evictionPolicy": "valueOnly", + "timeSynchronization": "disabled", + "bucketCapabilitiesVer": "", + "bucketCapabilities": [ + "cbhello", + "touch", + "couchapi", + "cccp", + "xdcrCheckpointing", + "nodesExt", + "dcp" + ] + } +] diff --git a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/cluster/_meta/data.json b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/cluster/_meta/data.json new file mode 100644 index 00000000..f0903757 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/cluster/_meta/data.json @@ -0,0 +1,79 @@ +{ + "@timestamp": "2016-05-23T08:05:34.853Z", + "beat": { + "hostname": "host.example.com", + "name": "host.example.com" + }, + "couchbase": { + "cluster": { + "hdd": { + "free": { + "bytes": 50705599693 + }, + "quota": { + "total": { + "bytes": 63381999616 + } + }, + "total": { + "bytes": 63381999616 + }, + "used": { + "by_data": { + "bytes": 16368995 + }, + "value": { + "bytes": 12676399923 + } + } + }, + "max_bucket_count": 10, + "quota": { + "index_memory": { + "mb": 300 + }, + "memory": { + "mb": 300 + } + }, + "ram": { + "quota": { + "total": { + "per_node": { + "bytes": 314572800 + }, + "value": { + "bytes": 314572800 + } + }, + "used": { + "per_node": { + "bytes": 104857600 + }, + "value": { + "bytes": 104857600 + } + } + }, + "total": { + "bytes": 8360542208 + }, + "used": { + "by_data": { + "bytes": 53962160 + }, + "value": { + "bytes": 7660535808 + } + } + } + } + }, + "metricset": { + "host": "couchbase:8091", + "module": "couchbase", + "name": "cluster", + "rtt": 115 + }, + "type": "metricsets" +} \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/cluster/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/cluster/_meta/docs.asciidoc new file mode 100644 index 00000000..cab2d3a9 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/cluster/_meta/docs.asciidoc @@ -0,0 +1,5 @@ +=== Couchbase Cluster Metricset + +The Couchbase `cluster` metricset collects data from Couchbase pools Rest API +`http://couchbasehost:8091/pools/default` and creates an event +containing cluster metrics. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/cluster/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/cluster/_meta/fields.yml new file mode 100644 index 00000000..9107e1af --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/cluster/_meta/fields.yml @@ -0,0 +1,77 @@ +- name: cluster + type: group + description: > + Couchbase cluster metrics. + fields: + - name: hdd.free.bytes + format: bytes + type: long + description: > + Free hard drive space in the cluster (bytes). + - name: hdd.quota.total.bytes + format: bytes + type: long + description: > + Hard drive quota total for the cluster (bytes). + - name: hdd.total.bytes + format: bytes + type: long + description: > + Total hard drive space available to the cluster (bytes). + - name: hdd.used.value.bytes + format: bytes + type: long + description: > + Hard drive space used by the cluster (bytes). + - name: hdd.used.by_data.bytes + format: bytes + type: long + description: > + Hard drive space used by the data in the cluster (bytes). + - name: max_bucket_count + type: long + description: > + Max bucket count setting. + - name: quota.index_memory.mb + type: long + description: > + Memory quota setting for the Index service (Mbyte). + - name: quota.memory.mb + type: long + description: > + Memory quota setting for the cluster (Mbyte). + - name: ram.quota.total.value.bytes + format: bytes + type: long + description: > + RAM quota total for the cluster (bytes). + - name: ram.quota.total.per_node.bytes + format: bytes + type: long + description: > + RAM quota used by the current node in the cluster (bytes). + - name: ram.quota.used.value.bytes + format: bytes + type: long + description: > + RAM quota used by the cluster (bytes). + - name: ram.quota.used.per_node.bytes + format: bytes + type: long + description: > + Ram quota used by the current node in the cluster (bytes) + - name: ram.total.bytes + format: bytes + type: long + description: > + Total RAM available to cluster (bytes). + - name: ram.used.value.bytes + format: bytes + type: long + description: > + RAM used by the cluster (bytes). + - name: ram.used.by_data.bytes + format: bytes + type: long + description: > + RAM used by the data in the cluster (bytes). diff --git a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/cluster/cluster.go b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/cluster/cluster.go new file mode 100644 index 00000000..f26e0420 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/cluster/cluster.go @@ -0,0 +1,82 @@ +package cluster + +import ( + "fmt" + "net/http" + + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/logp" + "github.com/elastic/beats/metricbeat/mb" + "github.com/elastic/beats/metricbeat/mb/parse" +) + +const ( + defaultScheme = "http" + defaultPath = "/pools/default" +) + +var ( + hostParser = parse.URLHostParserBuilder{ + DefaultScheme: defaultScheme, + DefaultPath: defaultPath, + }.Build() +) + +// init registers the MetricSet with the central registry. +// The New method will be called after the setup of the module and before starting to fetch data +func init() { + if err := mb.Registry.AddMetricSet("couchbase", "cluster", New, hostParser); err != nil { + panic(err) + } +} + +// MetricSet type defines all fields of the MetricSet +// As a minimum it must inherit the mb.BaseMetricSet fields, but can be extended with +// additional entries. These variables can be used to persist data or configuration between +// multiple fetch calls. +type MetricSet struct { + mb.BaseMetricSet + client *http.Client +} + +// New create a new instance of the MetricSet +// Part of new is also setting up the configuration by processing additional +// configuration entries if needed. +func New(base mb.BaseMetricSet) (mb.MetricSet, error) { + logp.Warn("EXPERIMENTAL: The couchbase cluster metricset is experimental") + + config := struct{}{} + + if err := base.Module().UnpackConfig(&config); err != nil { + return nil, err + } + + return &MetricSet{ + BaseMetricSet: base, + client: &http.Client{Timeout: base.Module().Config().Timeout}, + }, nil +} + +// Fetch methods implements the data gathering and data conversion to the right format +// It returns the event which is then forward to the output. In case of an error, a +// descriptive error must be returned. +func (m *MetricSet) Fetch() (common.MapStr, error) { + req, err := http.NewRequest("GET", m.HostData().SanitizedURI, nil) + if m.HostData().User != "" || m.HostData().Password != "" { + req.SetBasicAuth(m.HostData().User, m.HostData().Password) + } + + resp, err := m.client.Do(req) + + if err != nil { + return nil, fmt.Errorf("error making http request: %v", err) + } + + defer resp.Body.Close() + + if resp.StatusCode != 200 { + return nil, fmt.Errorf("Error Connecting to Couchbase %d: %s", resp.StatusCode, resp.Status) + } + + return eventMapping(resp.Body), nil +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/cluster/cluster_integration_test.go b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/cluster/cluster_integration_test.go new file mode 100644 index 00000000..a5e480fc --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/cluster/cluster_integration_test.go @@ -0,0 +1,27 @@ +// +build integration + +package cluster + +import ( + "testing" + + mbtest "github.com/elastic/beats/metricbeat/mb/testing" + "github.com/elastic/beats/metricbeat/module/couchbase" +) + +func TestData(t *testing.T) { + f := mbtest.NewEventFetcher(t, getConfig()) + + err := mbtest.WriteEvent(f, t) + if err != nil { + t.Fatal("write", err) + } +} + +func getConfig() map[string]interface{} { + return map[string]interface{}{ + "module": "couchbase", + "metricsets": []string{"cluster"}, + "hosts": []string{couchbase.GetEnvDSN()}, + } +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/cluster/cluster_test.go b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/cluster/cluster_test.go new file mode 100644 index 00000000..97c202d4 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/cluster/cluster_test.go @@ -0,0 +1,97 @@ +// +build !integration + +package cluster + +import ( + "io/ioutil" + "net/http" + "net/http/httptest" + "path/filepath" + "testing" + + mbtest "github.com/elastic/beats/metricbeat/mb/testing" + + "github.com/elastic/beats/libbeat/common" + "github.com/stretchr/testify/assert" +) + +func TestFetchEventContents(t *testing.T) { + absPath, err := filepath.Abs("./testdata/") + // response is a raw response from a couchbase + response, err := ioutil.ReadFile(absPath + "/sample_response.json") + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(200) + w.Header().Set("Content-Type", "appication/json;") + w.Write([]byte(response)) + })) + defer server.Close() + + config := map[string]interface{}{ + "module": "couchbase", + "metricsets": []string{"cluster"}, + "hosts": []string{server.URL}, + } + + f := mbtest.NewEventFetcher(t, config) + event, err := f.Fetch() + if !assert.NoError(t, err) { + t.FailNow() + } + + t.Logf("%s/%s event: %+v", f.Module().Name(), f.Name(), event.StringToPrint()) + + hdd := event["hdd"].(common.MapStr) + hdd_free := hdd["free"].(common.MapStr) + assert.EqualValues(t, 46902679716, hdd_free["bytes"]) + + hdd_total := hdd["total"].(common.MapStr) + assert.EqualValues(t, 63381999616, hdd_total["bytes"]) + + hdd_used := hdd["used"].(common.MapStr) + hdd_used_value := hdd_used["value"].(common.MapStr) + assert.EqualValues(t, 16479319900, hdd_used_value["bytes"]) + + hdd_used_by_data := hdd_used["by_data"].(common.MapStr) + assert.EqualValues(t, 16369010, hdd_used_by_data["bytes"]) + + hdd_quota := hdd["quota"].(common.MapStr) + hdd_quota_total := hdd_quota["total"].(common.MapStr) + assert.EqualValues(t, 63381999616, hdd_quota_total["bytes"]) + + assert.EqualValues(t, 10, event["max_bucket_count"]) + + quota := event["quota"].(common.MapStr) + quota_index_memory := quota["index_memory"].(common.MapStr) + assert.EqualValues(t, 300, quota_index_memory["mb"]) + + quota_memory := quota["memory"].(common.MapStr) + assert.EqualValues(t, 300, quota_memory["mb"]) + + ram := event["ram"].(common.MapStr) + + ram_quota := ram["quota"].(common.MapStr) + + ram_quota_total := ram_quota["total"].(common.MapStr) + ram_quota_total_value := ram_quota_total["value"].(common.MapStr) + assert.EqualValues(t, 314572800, ram_quota_total_value["bytes"]) + + ram_quota_total_per_node := ram_quota_total["per_node"].(common.MapStr) + assert.EqualValues(t, 314572800, ram_quota_total_per_node["bytes"]) + + ram_quota_used := ram_quota["used"].(common.MapStr) + ram_quota_used_value := ram_quota_used["value"].(common.MapStr) + assert.EqualValues(t, 104857600, ram_quota_used_value["bytes"]) + + ram_quota_used_per_node := ram_quota_used["per_node"].(common.MapStr) + assert.EqualValues(t, 104857600, ram_quota_used_per_node["bytes"]) + + ram_total := ram["total"].(common.MapStr) + assert.EqualValues(t, 8359174144, ram_total["bytes"]) + + ram_used := ram["used"].(common.MapStr) + ram_used_value := ram_used["value"].(common.MapStr) + assert.EqualValues(t, 8004751360, ram_used_value["bytes"]) + + ram_used_by_data := ram_used["by_data"].(common.MapStr) + assert.EqualValues(t, 53962016, ram_used_by_data["bytes"]) +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/cluster/data.go b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/cluster/data.go new file mode 100644 index 00000000..9d885e7f --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/cluster/data.go @@ -0,0 +1,118 @@ +package cluster + +import ( + "encoding/json" + "io" + + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/logp" +) + +type StorageTotals_Ram struct { + Total int64 `json:"total"` + QuotaTotal int64 `json:"quotaTotal"` + QuotaUsed int64 `json:"quotaUsed"` + Used int64 `json:"used"` + UsedByData int64 `json:"usedByData"` + QuotaUsedPerNode int64 `json:"quotaUsedPerNode"` + QuotaTotalPerNode int64 `json:"quotaTotalPerNode"` +} +type StorageTotals_Hdd struct { + Total int64 `json:"total"` + QuotaTotal int64 `json:"quotaTotal"` + Used int64 `json:"used"` + UsedByData int64 `json:"usedByData"` + Free int64 `json:"free"` +} + +type StorageTotals struct { + RAM StorageTotals_Ram `json:"ram"` + Hdd StorageTotals_Hdd `json:"hdd"` +} + +type Data struct { + StorageTotals StorageTotals `json:"storageTotals"` + IndexMemoryQuota int64 `json:"indexMemoryQuota"` + MemoryQuota int64 `json:"memoryQuota"` + RebalanceStatus string `json:"rebalanceStatus"` + RebalanceProgressURI string `json:"rebalanceProgressUri"` + StopRebalanceURI string `json:"stopRebalanceUri"` + NodeStatusesURI string `json:"nodeStatusesUri"` + MaxBucketCount int64 `json:"maxBucketCount"` +} + +func eventMapping(body io.Reader) common.MapStr { + + var d Data + err := json.NewDecoder(body).Decode(&d) + if err != nil { + logp.Err("Error: ", err) + } + + logp.Info("Printing Data:") + event := common.MapStr{ + "hdd": common.MapStr{ + "quota": common.MapStr{ + "total": common.MapStr{ + "bytes": d.StorageTotals.Hdd.QuotaTotal, + }, + }, + "free": common.MapStr{ + "bytes": d.StorageTotals.Hdd.Free, + }, + "total": common.MapStr{ + "bytes": d.StorageTotals.Hdd.Total, + }, + "used": common.MapStr{ + "value": common.MapStr{ + "bytes": d.StorageTotals.Hdd.Used, + }, + "by_data": common.MapStr{ + "bytes": d.StorageTotals.Hdd.UsedByData, + }, + }, + }, + "max_bucket_count": d.MaxBucketCount, + "quota": common.MapStr{ + "index_memory": common.MapStr{ + "mb": d.IndexMemoryQuota, + }, + "memory": common.MapStr{ + "mb": d.MemoryQuota, + }, + }, + "ram": common.MapStr{ + "quota": common.MapStr{ + "total": common.MapStr{ + "value": common.MapStr{ + "bytes": d.StorageTotals.RAM.QuotaTotal, + }, + "per_node": common.MapStr{ + "bytes": d.StorageTotals.RAM.QuotaTotalPerNode, + }, + }, + "used": common.MapStr{ + "value": common.MapStr{ + "bytes": d.StorageTotals.RAM.QuotaUsed, + }, + "per_node": common.MapStr{ + "bytes": d.StorageTotals.RAM.QuotaUsedPerNode, + }, + }, + }, + "total": common.MapStr{ + "bytes": d.StorageTotals.RAM.Total, + }, + "used": common.MapStr{ + "value": common.MapStr{ + "bytes": d.StorageTotals.RAM.Used, + }, + "by_data": common.MapStr{ + "bytes": d.StorageTotals.RAM.UsedByData, + }, + }, + }, + } + + return event +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/cluster/testdata/sample_response.json b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/cluster/testdata/sample_response.json new file mode 100644 index 00000000..73519a9b --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/cluster/testdata/sample_response.json @@ -0,0 +1,166 @@ +{ + "storageTotals": { + "ram": { + "total": 8359174144, + "quotaTotal": 314572800, + "quotaUsed": 104857600, + "used": 8004751360, + "usedByData": 53962016, + "quotaUsedPerNode": 104857600, + "quotaTotalPerNode": 314572800 + }, + "hdd": { + "total": 63381999616, + "quotaTotal": 63381999616, + "used": 16479319900, + "usedByData": 16369010, + "free": 46902679716 + } + }, + "ftsMemoryQuota": 512, + "indexMemoryQuota": 300, + "memoryQuota": 300, + "name": "default", + "alerts": [], + "alertsSilenceURL": "/controller/resetAlerts?token=0&uuid=e3b69f016eec9e85dbf74d6778fdf916", + "nodes": [ + { + "systemStats": { + "cpu_utilization_rate": 29.64705882352941, + "swap_total": 4189057024, + "swap_used": 135168, + "mem_total": 8359174144, + "mem_free": 4678324224 + }, + "interestingStats": { + "cmd_get": 0, + "couch_docs_actual_disk_size": 13563791, + "couch_docs_data_size": 9792512, + "couch_spatial_data_size": 0, + "couch_spatial_disk_size": 0, + "couch_views_actual_disk_size": 2805219, + "couch_views_data_size": 2805219, + "curr_items": 7303, + "curr_items_tot": 7303, + "ep_bg_fetched": 0, + "get_hits": 0, + "mem_used": 53962016, + "ops": 0, + "vb_replica_curr_items": 0 + }, + "uptime": "7260", + "memoryTotal": 8359174144, + "memoryFree": 4678324224, + "mcdMemoryReserved": 6377, + "mcdMemoryAllocated": 6377, + "couchApiBase": "http://172.17.0.2:8092/", + "couchApiBaseHTTPS": "https://172.17.0.2:18092/", + "otpCookie": "xxvitrrrilnhfkyh", + "clusterMembership": "active", + "recoveryType": "none", + "status": "healthy", + "otpNode": "ns_1@127.0.0.1", + "thisNode": true, + "hostname": "172.17.0.2:8091", + "clusterCompatibility": 262149, + "version": "4.5.1-2844-enterprise", + "os": "x86_64-unknown-linux-gnu", + "ports": { + "sslProxy": 11214, + "httpsMgmt": 18091, + "httpsCAPI": 18092, + "proxy": 11211, + "direct": 11210 + }, + "services": [ + "index", + "kv", + "n1ql" + ] + } + ], + "buckets": { + "uri": "/pools/default/buckets?v=42234267&uuid=e3b69f016eec9e85dbf74d6778fdf916", + "terseBucketsBase": "/pools/default/b/", + "terseStreamingBucketsBase": "/pools/default/bs/" + }, + "remoteClusters": { + "uri": "/pools/default/remoteClusters?uuid=e3b69f016eec9e85dbf74d6778fdf916", + "validateURI": "/pools/default/remoteClusters?just_validate=1" + }, + "controllers": { + "addNode": { + "uri": "/controller/addNodeV2?uuid=e3b69f016eec9e85dbf74d6778fdf916" + }, + "rebalance": { + "uri": "/controller/rebalance?uuid=e3b69f016eec9e85dbf74d6778fdf916" + }, + "failOver": { + "uri": "/controller/failOver?uuid=e3b69f016eec9e85dbf74d6778fdf916" + }, + "startGracefulFailover": { + "uri": "/controller/startGracefulFailover?uuid=e3b69f016eec9e85dbf74d6778fdf916" + }, + "reAddNode": { + "uri": "/controller/reAddNode?uuid=e3b69f016eec9e85dbf74d6778fdf916" + }, + "reFailOver": { + "uri": "/controller/reFailOver?uuid=e3b69f016eec9e85dbf74d6778fdf916" + }, + "ejectNode": { + "uri": "/controller/ejectNode?uuid=e3b69f016eec9e85dbf74d6778fdf916" + }, + "setRecoveryType": { + "uri": "/controller/setRecoveryType?uuid=e3b69f016eec9e85dbf74d6778fdf916" + }, + "setAutoCompaction": { + "uri": "/controller/setAutoCompaction?uuid=e3b69f016eec9e85dbf74d6778fdf916", + "validateURI": "/controller/setAutoCompaction?just_validate=1" + }, + "clusterLogsCollection": { + "startURI": "/controller/startLogsCollection?uuid=e3b69f016eec9e85dbf74d6778fdf916", + "cancelURI": "/controller/cancelLogsCollection?uuid=e3b69f016eec9e85dbf74d6778fdf916" + }, + "replication": { + "createURI": "/controller/createReplication?uuid=e3b69f016eec9e85dbf74d6778fdf916", + "validateURI": "/controller/createReplication?just_validate=1" + } + }, + "rebalanceStatus": "none", + "rebalanceProgressUri": "/pools/default/rebalanceProgress", + "stopRebalanceUri": "/controller/stopRebalance?uuid=e3b69f016eec9e85dbf74d6778fdf916", + "nodeStatusesUri": "/nodeStatuses", + "maxBucketCount": 10, + "autoCompactionSettings": { + "parallelDBAndViewCompaction": false, + "databaseFragmentationThreshold": { + "percentage": 30, + "size": "undefined" + }, + "viewFragmentationThreshold": { + "percentage": 30, + "size": "undefined" + }, + "indexCompactionMode": "circular", + "indexCircularCompaction": { + "daysOfWeek": "Sunday,Monday,Tuesday,Wednesday,Thursday,Friday,Saturday", + "interval": { + "fromHour": 0, + "toHour": 0, + "fromMinute": 0, + "toMinute": 0, + "abortOutside": false + } + }, + "indexFragmentationThreshold": { + "percentage": 30 + } + }, + "tasks": { + "uri": "/pools/default/tasks?v=74113599" + }, + "counters": {}, + "indexStatusURI": "/indexStatus?v=21137658", + "checkPermissionsURI": "/pools/default/checkPermissions?v=11181330", + "serverGroupsUri": "/pools/default/serverGroups?v=52184775" +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/doc.go b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/doc.go new file mode 100644 index 00000000..c7150227 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/doc.go @@ -0,0 +1,4 @@ +/* +Package couchbase is a Metricbeat module that contains MetricSets. +*/ +package couchbase diff --git a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/node/_meta/data.json b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/node/_meta/data.json new file mode 100644 index 00000000..0511a104 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/node/_meta/data.json @@ -0,0 +1,87 @@ +{ + "@timestamp": "2016-05-23T08:05:34.853Z", + "beat": { + "hostname": "host.example.com", + "name": "host.example.com" + }, + "couchbase": { + "node": { + "cmd_get": 0, + "couch": { + "docs": { + "actual_disk_size": { + "bytes": 13575997 + }, + "data_size": { + "bytes": 9804800 + } + }, + "spacial": { + "data_size": { + "bytes": 0 + }, + "disk_size": { + "bytes": 0 + } + }, + "views": { + "actual_disk_size": { + "bytes": 2792998 + }, + "data_size": { + "bytes": 2792998 + } + } + }, + "cpu_utilization_rate": { + "pct": 87.08791 + }, + "current_items": { + "total": 7303, + "value": 7303 + }, + "ep_bg_fetched": 0, + "get_hits": 0, + "hostname": "172.22.0.9:8091", + "mcd_memory": { + "allocated": { + "bytes": 6378 + }, + "reserved": { + "bytes": 6378 + } + }, + "memory": { + "free": { + "bytes": 2273726464 + }, + "total": { + "bytes": 8360542208 + }, + "used": { + "bytes": 53962160 + } + }, + "ops": 0, + "swap": { + "total": { + "bytes": 4189057024 + }, + "used": { + "bytes": 58044416 + } + }, + "uptime": { + "sec": 50492 + }, + "vb_replica_curr_items": 0 + } + }, + "metricset": { + "host": "couchbase:8091", + "module": "couchbase", + "name": "node", + "rtt": 115 + }, + "type": "metricsets" +} \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/node/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/node/_meta/docs.asciidoc new file mode 100644 index 00000000..e9e5970a --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/node/_meta/docs.asciidoc @@ -0,0 +1,5 @@ +=== Couchbase Node Metricset + +The Couchbase `node` metricset collects data from Couchbase pools Rest API +`http://couchbasehost:8091/pools/default` and creates an event for + each node in the cluster. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/node/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/node/_meta/fields.yml new file mode 100644 index 00000000..a96545c8 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/node/_meta/fields.yml @@ -0,0 +1,100 @@ +- name: node + type: group + description: > + Couchbase node metrics. + fields: + - name: cmd_get + type: long + description: > + Number of get commands + - name: couch.docs.actual_disk_size.bytes + format: bytes + type: long + description: > + Amount of disk space used by Couch docs (bytes). + - name: couch.docs.data_size.bytes + format: bytes + type: long + description: > + Data size of Couch docs associated with a node (bytes). + - name: couch.spatial.data_size.bytes + type: long + description: > + Size of object data for spatial views (bytes). + - name: couch.spatial.disk_size.bytes + type: long + description: > + Amount of disk space used by spatial views (bytes). + - name: couch.views.actual_disk_size.bytes + type: long + description: > + Amount of disk space used by Couch views (bytes). + - name: couch.views.data_size.bytes + type: long + description: > + Size of object data for Couch views (bytes). + - name: cpu_utilization_rate.pct + type: scaled_float + description: > + The CPU utilization rate (%). + - name: current_items.value + type: long + description: > + Number of current items. + - name: current_items.total + type: long + description: > + Total number of items associated with the node. + - name: ep_bg_fetched + type: long + description: > + Number of disk fetches performed since the server was started. + - name: get_hits + type: long + description: > + Number of get hits. + - name: hostname + type: keyword + description: > + The hostname of the node. + - name: mcd_memory.allocated.bytes + format: bytes + type: long + description: > + Amount of memcached memory allocated (bytes). + - name: mcd_memory.reserved.bytes + type: long + description: > + Amount of memcached memory reserved (bytes). + - name: memory.free.bytes + type: long + description: > + Amount of memory free for the node (bytes). + - name: memory.total.bytes + type: long + description: > + Total memory available to the node (bytes). + - name: memory.used.bytes + type: long + description: > + Memory used by the node (bytes). + - name: ops + type: long + description: > + Number of operations performed on Couchbase. + - name: swap.total.bytes + type: long + description: > + Total swap size allocated (bytes). + - name: swap.used.bytes + type: long + description: > + Amount of swap space used (bytes). + - name: uptime.sec + type: long + description: > + Time during which the node was in operation (sec). + - name: vb_replica_curr_items + type: long + description: > + Number of items/documents that are replicas. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/node/data.go b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/node/data.go new file mode 100644 index 00000000..bd27bbb4 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/node/data.go @@ -0,0 +1,148 @@ +package node + +import ( + "encoding/json" + "io" + + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/logp" + "strconv" +) + +type NodeSystemStats struct { + CPUUtilizationRate float64 `json:"cpu_utilization_rate"` + SwapTotal int64 `json:"swap_total"` + SwapUsed int64 `json:"swap_used"` + MemTotal int64 `json:"mem_total"` + MemFree int64 `json:"mem_free"` +} + +type NodeInterestingStats struct { + CmdGet int64 `json:"cmd_get"` + CouchDocsActualDiskSize int64 `json:"couch_docs_actual_disk_size"` + CouchDocsDataSize int64 `json:"couch_docs_data_size"` + CouchSpatialDataSize int64 `json:"couch_spatial_data_size"` + CouchSpatialDiskSize int64 `json:"couch_spatial_disk_size"` + CouchViewsActualDiskSize int64 `json:"couch_views_actual_disk_size"` + CouchViewsDataSize int64 `json:"couch_views_data_size"` + CurrItems int64 `json:"curr_items"` + CurrItemsTot int64 `json:"curr_items_tot"` + EpBgFetched int64 `json:"ep_bg_fetched"` + GetHits int64 `json:"get_hits"` + MemUsed int64 `json:"mem_used"` + Ops int64 `json:"ops"` + VbReplicaCurrItems int64 `json:"vb_replica_curr_items"` +} + +type Node struct { + SystemStats NodeSystemStats `json:"systemStats"` + InterestingStats NodeInterestingStats `json:"interestingStats"` + Uptime string `json:"uptime"` + MemoryTotal int64 `json:"memoryTotal"` + MemoryFree int64 `json:"memoryFree"` + McdMemoryReserved int64 `json:"mcdMemoryReserved"` + McdMemoryAllocated int64 `json:"mcdMemoryAllocated"` + ClusterMembership string `json:"clusterMembership"` + RecoveryType string `json:"recoveryType"` + Status string `json:"status"` + ThisNode bool `json:"thisNode"` + Hostname string `json:"hostname"` + ClusterCompatibility int64 `json:"clusterCompatibility"` + Version string `json:"version"` + Os string `json:"os"` +} + +type Data struct { + Nodes []Node `json:"nodes"` +} + +func eventsMapping(body io.Reader) []common.MapStr { + + var d Data + err := json.NewDecoder(body).Decode(&d) + if err != nil { + logp.Err("Error: ", err) + } + + events := []common.MapStr{} + + for _, NodeItem := range d.Nodes { + uptime, _ := strconv.ParseInt(NodeItem.Uptime, 10, 64) + + event := common.MapStr{ + "cmd_get": NodeItem.InterestingStats.CmdGet, + "couch": common.MapStr{ + "docs": common.MapStr{ + "actual_disk_size": common.MapStr{ + "bytes": NodeItem.InterestingStats.CouchDocsActualDiskSize, + }, + "data_size": common.MapStr{ + "bytes": NodeItem.InterestingStats.CouchDocsDataSize, + }, + }, + "spacial": common.MapStr{ + "data_size": common.MapStr{ + "bytes": NodeItem.InterestingStats.CouchSpatialDataSize, + }, + "disk_size": common.MapStr{ + "bytes": NodeItem.InterestingStats.CouchSpatialDiskSize, + }, + }, + "views": common.MapStr{ + "actual_disk_size": common.MapStr{ + "bytes": NodeItem.InterestingStats.CouchViewsActualDiskSize, + }, + "data_size": common.MapStr{ + "bytes": NodeItem.InterestingStats.CouchViewsDataSize, + }, + }, + }, + "cpu_utilization_rate": common.MapStr{ + "pct": NodeItem.SystemStats.CPUUtilizationRate, + }, + "current_items": common.MapStr{ + "value": NodeItem.InterestingStats.CurrItems, + "total": NodeItem.InterestingStats.CurrItemsTot, + }, + "ep_bg_fetched": NodeItem.InterestingStats.EpBgFetched, + "get_hits": NodeItem.InterestingStats.GetHits, + "hostname": NodeItem.Hostname, + "mcd_memory": common.MapStr{ + "reserved": common.MapStr{ + "bytes": NodeItem.McdMemoryReserved, + }, + "allocated": common.MapStr{ + "bytes": NodeItem.McdMemoryAllocated, + }, + }, + "memory": common.MapStr{ + "total": common.MapStr{ + "bytes": NodeItem.SystemStats.MemTotal, + }, + "free": common.MapStr{ + "bytes": NodeItem.SystemStats.MemFree, + }, + "used": common.MapStr{ + "bytes": NodeItem.InterestingStats.MemUsed, + }, + }, + "ops": NodeItem.InterestingStats.Ops, + "swap": common.MapStr{ + "total": common.MapStr{ + "bytes": NodeItem.SystemStats.SwapTotal, + }, + "used": common.MapStr{ + "bytes": NodeItem.SystemStats.SwapUsed, + }, + }, + "uptime": common.MapStr{ + "sec": uptime, + }, + "vb_replica_curr_items": NodeItem.InterestingStats.VbReplicaCurrItems, + } + events = append(events, event) + } + + return events + +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/node/node.go b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/node/node.go new file mode 100644 index 00000000..b88d2ba0 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/node/node.go @@ -0,0 +1,82 @@ +package node + +import ( + "fmt" + "net/http" + + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/logp" + "github.com/elastic/beats/metricbeat/mb" + "github.com/elastic/beats/metricbeat/mb/parse" +) + +const ( + defaultScheme = "http" + defaultPath = "/pools/default" +) + +var ( + hostParser = parse.URLHostParserBuilder{ + DefaultScheme: defaultScheme, + DefaultPath: defaultPath, + }.Build() +) + +// init registers the MetricSet with the central registry. +// The New method will be called after the setup of the module and before starting to fetch data +func init() { + if err := mb.Registry.AddMetricSet("couchbase", "node", New, hostParser); err != nil { + panic(err) + } +} + +// MetricSet type defines all fields of the MetricSet +// As a minimum it must inherit the mb.BaseMetricSet fields, but can be extended with +// additional entries. These variables can be used to persist data or configuration between +// multiple fetch calls. +type MetricSet struct { + mb.BaseMetricSet + client *http.Client +} + +// New create a new instance of the MetricSet +// Part of new is also setting up the configuration by processing additional +// configuration entries if needed. +func New(base mb.BaseMetricSet) (mb.MetricSet, error) { + logp.Warn("EXPERIMENTAL: The couchbase node metricset is experimental") + + config := struct{}{} + + if err := base.Module().UnpackConfig(&config); err != nil { + return nil, err + } + + return &MetricSet{ + BaseMetricSet: base, + client: &http.Client{Timeout: base.Module().Config().Timeout}, + }, nil +} + +// Fetch methods implements the data gathering and data conversion to the right format +// It returns the event which is then forward to the output. In case of an error, a +// descriptive error must be returned. +func (m *MetricSet) Fetch() ([]common.MapStr, error) { + req, err := http.NewRequest("GET", m.HostData().SanitizedURI, nil) + if m.HostData().User != "" || m.HostData().Password != "" { + req.SetBasicAuth(m.HostData().User, m.HostData().Password) + } + + resp, err := m.client.Do(req) + + if err != nil { + return nil, fmt.Errorf("error making http request: %v", err) + } + + defer resp.Body.Close() + + if resp.StatusCode != 200 { + return nil, fmt.Errorf("Error Connecting to Couchbase %d: %s", resp.StatusCode, resp.Status) + } + + return eventsMapping(resp.Body), nil +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/node/node_integration_test.go b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/node/node_integration_test.go new file mode 100644 index 00000000..0acfa5c2 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/node/node_integration_test.go @@ -0,0 +1,27 @@ +// +build integration + +package node + +import ( + "testing" + + mbtest "github.com/elastic/beats/metricbeat/mb/testing" + "github.com/elastic/beats/metricbeat/module/couchbase" +) + +func TestData(t *testing.T) { + f := mbtest.NewEventsFetcher(t, getConfig()) + + err := mbtest.WriteEvents(f, t) + if err != nil { + t.Fatal("write", err) + } +} + +func getConfig() map[string]interface{} { + return map[string]interface{}{ + "module": "couchbase", + "metricsets": []string{"node"}, + "hosts": []string{couchbase.GetEnvDSN()}, + } +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/node/node_test.go b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/node/node_test.go new file mode 100644 index 00000000..f49b71f0 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/node/node_test.go @@ -0,0 +1,110 @@ +// +build !integration + +package node + +import ( + "io/ioutil" + "net/http" + "net/http/httptest" + "path/filepath" + "testing" + + "github.com/elastic/beats/libbeat/common" + mbtest "github.com/elastic/beats/metricbeat/mb/testing" + + "github.com/stretchr/testify/assert" +) + +func TestFetchEventContents(t *testing.T) { + absPath, err := filepath.Abs("./testdata/") + // response is a raw response from a couchbase + response, err := ioutil.ReadFile(absPath + "/sample_response.json") + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(200) + w.Header().Set("Content-Type", "appication/json;") + w.Write([]byte(response)) + })) + defer server.Close() + + config := map[string]interface{}{ + "module": "couchbase", + "metricsets": []string{"node"}, + "hosts": []string{server.URL}, + } + + f := mbtest.NewEventsFetcher(t, config) + events, err := f.Fetch() + event := events[0] + if !assert.NoError(t, err) { + t.FailNow() + } + + t.Logf("%s/%s event: %+v", f.Module().Name(), f.Name(), event.StringToPrint()) + + assert.EqualValues(t, 0, event["cmd_get"]) + + couch := event["couch"].(common.MapStr) + + couch_docs := couch["docs"].(common.MapStr) + couch_docs_actual_disk_size := couch_docs["actual_disk_size"].(common.MapStr) + assert.EqualValues(t, 13563791, couch_docs_actual_disk_size["bytes"]) + + couch_docs_data_size := couch_docs["data_size"].(common.MapStr) + assert.EqualValues(t, 9792512, couch_docs_data_size["bytes"]) + + couch_spacial := couch["spacial"].(common.MapStr) + couch_spacial_data_size := couch_spacial["data_size"].(common.MapStr) + assert.EqualValues(t, 0, couch_spacial_data_size["bytes"]) + + couch_spacial_disk_size := couch_spacial["disk_size"].(common.MapStr) + assert.EqualValues(t, 0, couch_spacial_disk_size["bytes"]) + + couch_views := couch["views"].(common.MapStr) + couch_views_actual_disk_size := couch_views["actual_disk_size"].(common.MapStr) + assert.EqualValues(t, 2805219, couch_views_actual_disk_size["bytes"]) + + couch_views_data_size := couch_views["data_size"].(common.MapStr) + assert.EqualValues(t, 2805219, couch_views_data_size["bytes"]) + + event_cpu_utilization_rate := event["cpu_utilization_rate"].(common.MapStr) + assert.EqualValues(t, 29.64705882352941, event_cpu_utilization_rate["pct"]) + + current_items := event["current_items"].(common.MapStr) + assert.EqualValues(t, 7303, current_items["value"]) + assert.EqualValues(t, 7303, current_items["total"]) + + assert.EqualValues(t, 0, event["ep_bg_fetched"]) + assert.EqualValues(t, 0, event["get_hits"]) + assert.Equal(t, "172.17.0.2:8091", event["hostname"]) + + mcd_memory := event["mcd_memory"].(common.MapStr) + mcd_memory_reserved := mcd_memory["reserved"].(common.MapStr) + assert.EqualValues(t, 6377, mcd_memory_reserved["bytes"]) + + mcd_memory_allocated := mcd_memory["allocated"].(common.MapStr) + assert.EqualValues(t, 6377, mcd_memory_allocated["bytes"]) + + memory := event["memory"].(common.MapStr) + memory_total := memory["total"].(common.MapStr) + assert.EqualValues(t, 8359174144, memory_total["bytes"]) + + memory_free := memory["free"].(common.MapStr) + assert.EqualValues(t, 4678324224, memory_free["bytes"]) + + memory_used := memory["used"].(common.MapStr) + assert.EqualValues(t, 53962016, memory_used["bytes"]) + + assert.EqualValues(t, 0, event["ops"]) + + swap := event["swap"].(common.MapStr) + swap_total := swap["total"].(common.MapStr) + assert.EqualValues(t, 4189057024, swap_total["bytes"]) + + swap_used := swap["used"].(common.MapStr) + assert.EqualValues(t, 135168, swap_used["bytes"]) + + event_uptime := event["uptime"].(common.MapStr) + assert.EqualValues(t, 7260, event_uptime["sec"]) + + assert.EqualValues(t, 0, event["vb_replica_curr_items"]) +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/node/testdata/sample_response.json b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/node/testdata/sample_response.json new file mode 100644 index 00000000..73519a9b --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/node/testdata/sample_response.json @@ -0,0 +1,166 @@ +{ + "storageTotals": { + "ram": { + "total": 8359174144, + "quotaTotal": 314572800, + "quotaUsed": 104857600, + "used": 8004751360, + "usedByData": 53962016, + "quotaUsedPerNode": 104857600, + "quotaTotalPerNode": 314572800 + }, + "hdd": { + "total": 63381999616, + "quotaTotal": 63381999616, + "used": 16479319900, + "usedByData": 16369010, + "free": 46902679716 + } + }, + "ftsMemoryQuota": 512, + "indexMemoryQuota": 300, + "memoryQuota": 300, + "name": "default", + "alerts": [], + "alertsSilenceURL": "/controller/resetAlerts?token=0&uuid=e3b69f016eec9e85dbf74d6778fdf916", + "nodes": [ + { + "systemStats": { + "cpu_utilization_rate": 29.64705882352941, + "swap_total": 4189057024, + "swap_used": 135168, + "mem_total": 8359174144, + "mem_free": 4678324224 + }, + "interestingStats": { + "cmd_get": 0, + "couch_docs_actual_disk_size": 13563791, + "couch_docs_data_size": 9792512, + "couch_spatial_data_size": 0, + "couch_spatial_disk_size": 0, + "couch_views_actual_disk_size": 2805219, + "couch_views_data_size": 2805219, + "curr_items": 7303, + "curr_items_tot": 7303, + "ep_bg_fetched": 0, + "get_hits": 0, + "mem_used": 53962016, + "ops": 0, + "vb_replica_curr_items": 0 + }, + "uptime": "7260", + "memoryTotal": 8359174144, + "memoryFree": 4678324224, + "mcdMemoryReserved": 6377, + "mcdMemoryAllocated": 6377, + "couchApiBase": "http://172.17.0.2:8092/", + "couchApiBaseHTTPS": "https://172.17.0.2:18092/", + "otpCookie": "xxvitrrrilnhfkyh", + "clusterMembership": "active", + "recoveryType": "none", + "status": "healthy", + "otpNode": "ns_1@127.0.0.1", + "thisNode": true, + "hostname": "172.17.0.2:8091", + "clusterCompatibility": 262149, + "version": "4.5.1-2844-enterprise", + "os": "x86_64-unknown-linux-gnu", + "ports": { + "sslProxy": 11214, + "httpsMgmt": 18091, + "httpsCAPI": 18092, + "proxy": 11211, + "direct": 11210 + }, + "services": [ + "index", + "kv", + "n1ql" + ] + } + ], + "buckets": { + "uri": "/pools/default/buckets?v=42234267&uuid=e3b69f016eec9e85dbf74d6778fdf916", + "terseBucketsBase": "/pools/default/b/", + "terseStreamingBucketsBase": "/pools/default/bs/" + }, + "remoteClusters": { + "uri": "/pools/default/remoteClusters?uuid=e3b69f016eec9e85dbf74d6778fdf916", + "validateURI": "/pools/default/remoteClusters?just_validate=1" + }, + "controllers": { + "addNode": { + "uri": "/controller/addNodeV2?uuid=e3b69f016eec9e85dbf74d6778fdf916" + }, + "rebalance": { + "uri": "/controller/rebalance?uuid=e3b69f016eec9e85dbf74d6778fdf916" + }, + "failOver": { + "uri": "/controller/failOver?uuid=e3b69f016eec9e85dbf74d6778fdf916" + }, + "startGracefulFailover": { + "uri": "/controller/startGracefulFailover?uuid=e3b69f016eec9e85dbf74d6778fdf916" + }, + "reAddNode": { + "uri": "/controller/reAddNode?uuid=e3b69f016eec9e85dbf74d6778fdf916" + }, + "reFailOver": { + "uri": "/controller/reFailOver?uuid=e3b69f016eec9e85dbf74d6778fdf916" + }, + "ejectNode": { + "uri": "/controller/ejectNode?uuid=e3b69f016eec9e85dbf74d6778fdf916" + }, + "setRecoveryType": { + "uri": "/controller/setRecoveryType?uuid=e3b69f016eec9e85dbf74d6778fdf916" + }, + "setAutoCompaction": { + "uri": "/controller/setAutoCompaction?uuid=e3b69f016eec9e85dbf74d6778fdf916", + "validateURI": "/controller/setAutoCompaction?just_validate=1" + }, + "clusterLogsCollection": { + "startURI": "/controller/startLogsCollection?uuid=e3b69f016eec9e85dbf74d6778fdf916", + "cancelURI": "/controller/cancelLogsCollection?uuid=e3b69f016eec9e85dbf74d6778fdf916" + }, + "replication": { + "createURI": "/controller/createReplication?uuid=e3b69f016eec9e85dbf74d6778fdf916", + "validateURI": "/controller/createReplication?just_validate=1" + } + }, + "rebalanceStatus": "none", + "rebalanceProgressUri": "/pools/default/rebalanceProgress", + "stopRebalanceUri": "/controller/stopRebalance?uuid=e3b69f016eec9e85dbf74d6778fdf916", + "nodeStatusesUri": "/nodeStatuses", + "maxBucketCount": 10, + "autoCompactionSettings": { + "parallelDBAndViewCompaction": false, + "databaseFragmentationThreshold": { + "percentage": 30, + "size": "undefined" + }, + "viewFragmentationThreshold": { + "percentage": 30, + "size": "undefined" + }, + "indexCompactionMode": "circular", + "indexCircularCompaction": { + "daysOfWeek": "Sunday,Monday,Tuesday,Wednesday,Thursday,Friday,Saturday", + "interval": { + "fromHour": 0, + "toHour": 0, + "fromMinute": 0, + "toMinute": 0, + "abortOutside": false + } + }, + "indexFragmentationThreshold": { + "percentage": 30 + } + }, + "tasks": { + "uri": "/pools/default/tasks?v=74113599" + }, + "counters": {}, + "indexStatusURI": "/indexStatus?v=21137658", + "checkPermissionsURI": "/pools/default/checkPermissions?v=11181330", + "serverGroupsUri": "/pools/default/serverGroups?v=52184775" +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/testing.go b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/testing.go new file mode 100644 index 00000000..f91981e6 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/testing.go @@ -0,0 +1,7 @@ +package couchbase + +import "os" + +func GetEnvDSN() string { + return os.Getenv("COUCHBASE_DSN") +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/cpu.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/cpu.go index b945c039..333ff88d 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/cpu.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/cpu.go @@ -44,7 +44,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { // Fetch returns a list of docker CPU stats. func (m *MetricSet) Fetch() ([]common.MapStr, error) { - stats, err := docker.FetchStats(m.dockerClient) + stats, err := docker.FetchStats(m.dockerClient, m.Module().Config().Timeout) if err != nil { return nil, err } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/diskio.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/diskio.go index a0e36b5d..81b9c86e 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/diskio.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/diskio.go @@ -46,7 +46,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { // Fetch creates list of events with diskio stats for all containers. func (m *MetricSet) Fetch() ([]common.MapStr, error) { - stats, err := docker.FetchStats(m.dockerClient) + stats, err := docker.FetchStats(m.dockerClient, m.Module().Config().Timeout) if err != nil { return nil, err } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/docker.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/docker.go index 67437eef..9b2b5a1c 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/docker.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/docker.go @@ -7,6 +7,8 @@ import ( "github.com/elastic/beats/metricbeat/mb" "github.com/elastic/beats/metricbeat/mb/parse" + "time" + "github.com/fsouza/go-dockerclient" ) @@ -58,7 +60,7 @@ func NewDockerClient(endpoint string, config Config) (*docker.Client, error) { } // FetchStats returns a list of running containers with all related stats inside -func FetchStats(client *docker.Client) ([]Stat, error) { +func FetchStats(client *docker.Client, timeout time.Duration) ([]Stat, error) { containers, err := client.ListContainers(docker.ListContainersOptions{}) if err != nil { return nil, err @@ -66,24 +68,29 @@ func FetchStats(client *docker.Client) ([]Stat, error) { var wg sync.WaitGroup - containersList := []Stat{} - queue := make(chan Stat, 1) + containersList := make([]Stat, 0, len(containers)) + statsQueue := make(chan Stat, 1) wg.Add(len(containers)) for _, container := range containers { go func(container docker.APIContainers) { - queue <- exportContainerStats(client, &container) + defer wg.Done() + statsQueue <- exportContainerStats(client, &container, timeout) }(container) } go func() { - for container := range queue { - containersList = append(containersList, container) - wg.Done() - } + wg.Wait() + close(statsQueue) }() - wg.Wait() + // This will break after the queue has been drained and queue is closed. + for stat := range statsQueue { + // If names is empty, there is not data inside + if len(stat.Container.Names) != 0 { + containersList = append(containersList, stat) + } + } return containersList, err } @@ -93,7 +100,7 @@ func FetchStats(client *docker.Client) ([]Stat, error) { // This is currently very inefficient as docker calculates the average for each request, // means each request will take at least 2s: https://github.com/docker/docker/blob/master/cli/command/container/stats_helpers.go#L148 // Getting all stats at once is implemented here: https://github.com/docker/docker/pull/25361 -func exportContainerStats(client *docker.Client, container *docker.APIContainers) Stat { +func exportContainerStats(client *docker.Client, container *docker.APIContainers, timeout time.Duration) Stat { var wg sync.WaitGroup var event Stat @@ -103,7 +110,7 @@ func exportContainerStats(client *docker.Client, container *docker.APIContainers ID: container.ID, Stats: statsC, Stream: false, - Timeout: -1, + Timeout: timeout, } wg.Add(2) diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/memory/memory.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/memory/memory.go index 51182882..85a3f7b8 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/memory/memory.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/memory/memory.go @@ -44,7 +44,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { // Fetch creates a list of memory events for each container. func (m *MetricSet) Fetch() ([]common.MapStr, error) { - stats, err := docker.FetchStats(m.dockerClient) + stats, err := docker.FetchStats(m.dockerClient, m.Module().Config().Timeout) if err != nil { return nil, err } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/network/network.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/network/network.go index 8c8aa13d..2597d8ab 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/network/network.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/network/network.go @@ -46,7 +46,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { // Fetch methods creates a list of network events for each container. func (m *MetricSet) Fetch() ([]common.MapStr, error) { - stats, err := docker.FetchStats(m.dockerClient) + stats, err := docker.FetchStats(m.dockerClient, m.Module().Config().Timeout) if err != nil { return nil, err } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/haproxy/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/haproxy/_meta/docs.asciidoc index 780fd392..f6407ce4 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/haproxy/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/haproxy/_meta/docs.asciidoc @@ -1,12 +1,14 @@ == HAProxy Module -This is the haproxy Module. To enable stats collection from HAProxy, you must enable the stats socket via TCP. -For example, placing the following statement under the `global` or `default` section of the haproxy config: +This module collects stats from http://www.haproxy.org/[HAProxy]. To configure +HAProxy to collect stats, you must enable the stats socket via TCP. For example, +to enable stats reporting via any local IP on port 14567, place this statement +under the `global` or `default` section of the haproxy config: `stats socket 127.0.0.1:14567` -will enable stats reporting via any local IP on port 14567. Please note that you should probably use an internal private IP -or secure this with a firewall rule so that only designated hosts can access this data. +NOTE: You should use an internal private IP, or secure this with a firewall +rule, so that only designated hosts can access this data. [float] === Compatibility diff --git a/vendor/github.com/elastic/beats/metricbeat/module/haproxy/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/haproxy/_meta/fields.yml index 2cd41422..fe6b5616 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/haproxy/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/haproxy/_meta/fields.yml @@ -1,13 +1,13 @@ - key: haproxy - title: "haproxy" + title: "HAProxy" description: > experimental[] - haproxy Module + HAProxy Module short_config: false fields: - name: haproxy type: group description: > - HAProx metrics. + HAProxy metrics. fields: diff --git a/vendor/github.com/elastic/beats/metricbeat/module/haproxy/info/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/haproxy/info/_meta/docs.asciidoc index 1eb0c3c0..abb5442a 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/haproxy/info/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/haproxy/info/_meta/docs.asciidoc @@ -1,3 +1,3 @@ -=== HAProxy info MetricSet +=== HAProxy Info Metricset -This is the info metricset of the haproxy module. +The HAProxy `info` metricset collects general information about HAProxy processes. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/haproxy/info/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/haproxy/info/_meta/fields.yml index 6252c0cb..c8ff0ed3 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/haproxy/info/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/haproxy/info/_meta/fields.yml @@ -1,22 +1,22 @@ - name: info type: group description: > - General infomration collected on HAProxy process + General information about HAProxy processes. fields: - name: processes type: long description: > - Number of processes + Number of processes. - name: process_num type: long description: > - Process number + Process number. - name: pid type: long description: > - Process ID + Process ID. - name: run_queue type: long @@ -29,18 +29,18 @@ - name: uptime.sec type: long description: > - Current uptime in seconds + Current uptime in seconds. - name: memory.max.bytes type: long format: bytes description: > - Max number of memory usage in bytes (The 'Memmax_MB' value converted to bytes) + Maximum amount of memory usage in bytes (the 'Memmax_MB' value converted to bytes). - name: ulimit_n type: long description: > - Max number of open files for process + Maximum number of open files for the process. - name: compress type: group @@ -89,32 +89,32 @@ - name: current type: long description: > - Current Connections + Current connections. - name: total type: long description: > - Total Connections + Total connections. - name: ssl.current type: long description: > - Current ssl Connections + Current SSL connections. - name: ssl.total type: long description: > - Current Connections + Total SSL connections. - name: ssl.max type: long description: > - Current Connections + Maximum SSL connections. - name: max type: long description: > - Max connections + Maximum connections. - name: hard_max type: long diff --git a/vendor/github.com/elastic/beats/metricbeat/module/haproxy/stat/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/haproxy/stat/_meta/docs.asciidoc index c3c90f78..63e90104 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/haproxy/stat/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/haproxy/stat/_meta/docs.asciidoc @@ -1,133 +1,5 @@ -=== HAProxy stat MetricSet +=== HAProxy Stat Metricset -This is the stat metricset of the haproxy module. +The HAProxy `stat` metricset collects 'stat' fields from HAProxy processes. - -[float] -=== Official documentation for fields - -A list of the 'stat' fields along with a basic description can be found here: - -http://www.haproxy.org/download/1.6/doc/management.txt - -The following documentation bellow is an extract from the URL above, more specifically from section "9.1. CSV format" - -[source,log] ----- -In brackets after each field name are the types which may have a value for -that field. The types are L (Listeners), F (Frontends), B (Backends), and -S (Servers). - - 0. pxname [LFBS]: proxy name - 1. svname [LFBS]: service name (FRONTEND for frontend, BACKEND for backend, - any name for server/listener) - 2. qcur [..BS]: current queued requests. For the backend this reports the - number queued without a server assigned. - 3. qmax [..BS]: max value of qcur - 4. scur [LFBS]: current sessions - 5. smax [LFBS]: max sessions - 6. slim [LFBS]: configured session limit - 7. stot [LFBS]: cumulative number of connections - 8. bin [LFBS]: bytes in - 9. bout [LFBS]: bytes out - 10. dreq [LFB.]: requests denied because of security concerns. - - For tcp this is because of a matched tcp-request content rule. - - For http this is because of a matched http-request or tarpit rule. - 11. dresp [LFBS]: responses denied because of security concerns. - - For http this is because of a matched http-request rule, or - "option checkcache". - 12. ereq [LF..]: request errors. Some of the possible causes are: - - early termination from the client, before the request has been sent. - - read error from the client - - client timeout - - client closed connection - - various bad requests from the client. - - request was tarpitted. - 13. econ [..BS]: number of requests that encountered an error trying to - connect to a backend server. The backend stat is the sum of the stat - for all servers of that backend, plus any connection errors not - associated with a particular server (such as the backend having no - active servers). - 14. eresp [..BS]: response errors. srv_abrt will be counted here also. - Some other errors are: - - write error on the client socket (won't be counted for the server stat) - - failure applying filters to the response. - 15. wretr [..BS]: number of times a connection to a server was retried. - 16. wredis [..BS]: number of times a request was redispatched to another - server. The server value counts the number of times that server was - switched away from. - 17. status [LFBS]: status (UP/DOWN/NOLB/MAINT/MAINT(via)...) - 18. weight [..BS]: total weight (backend), server weight (server) - 19. act [..BS]: number of active servers (backend), server is active (server) - 20. bck [..BS]: number of backup servers (backend), server is backup (server) - 21. chkfail [...S]: number of failed checks. (Only counts checks failed when - the server is up.) - 22. chkdown [..BS]: number of UP->DOWN transitions. The backend counter counts - transitions to the whole backend being down, rather than the sum of the - counters for each server. - 23. lastchg [..BS]: number of seconds since the last UP<->DOWN transition - 24. downtime [..BS]: total downtime (in seconds). The value for the backend - is the downtime for the whole backend, not the sum of the server downtime. - 25. qlimit [...S]: configured maxqueue for the server, or nothing in the - value is 0 (default, meaning no limit) - 26. pid [LFBS]: process id (0 for first instance, 1 for second, ...) - 27. iid [LFBS]: unique proxy id - 28. sid [L..S]: server id (unique inside a proxy) - 29. throttle [...S]: current throttle percentage for the server, when - slowstart is active, or no value if not in slowstart. - 30. lbtot [..BS]: total number of times a server was selected, either for new - sessions, or when re-dispatching. The server counter is the number - of times that server was selected. - 31. tracked [...S]: id of proxy/server if tracking is enabled. - 32. type [LFBS]: (0=frontend, 1=backend, 2=server, 3=socket/listener) - 33. rate [.FBS]: number of sessions per second over last elapsed second - 34. rate_lim [.F..]: configured limit on new sessions per second - 35. rate_max [.FBS]: max number of new sessions per second - 36. check_status [...S]: status of last health check, one of: - UNK -> unknown - INI -> initializing - SOCKERR -> socket error - L4OK -> check passed on layer 4, no upper layers testing enabled - L4TOUT -> layer 1-4 timeout - L4CON -> layer 1-4 connection problem, for example - "Connection refused" (tcp rst) or "No route to host" (icmp) - L6OK -> check passed on layer 6 - L6TOUT -> layer 6 (SSL) timeout - L6RSP -> layer 6 invalid response - protocol error - L7OK -> check passed on layer 7 - L7OKC -> check conditionally passed on layer 7, for example 404 with - disable-on-404 - L7TOUT -> layer 7 (HTTP/SMTP) timeout - L7RSP -> layer 7 invalid response - protocol error - L7STS -> layer 7 response error, for example HTTP 5xx - 37. check_code [...S]: layer5-7 code, if available - 38. check_duration [...S]: time in ms took to finish last health check - 39. hrsp_1xx [.FBS]: http responses with 1xx code - 40. hrsp_2xx [.FBS]: http responses with 2xx code - 41. hrsp_3xx [.FBS]: http responses with 3xx code - 42. hrsp_4xx [.FBS]: http responses with 4xx code - 43. hrsp_5xx [.FBS]: http responses with 5xx code - 44. hrsp_other [.FBS]: http responses with other codes (protocol error) - 45. hanafail [...S]: failed health checks details - 46. req_rate [.F..]: HTTP requests per second over last elapsed second - 47. req_rate_max [.F..]: max number of HTTP requests per second observed - 48. req_tot [.F..]: total number of HTTP requests received - 49. cli_abrt [..BS]: number of data transfers aborted by the client - 50. srv_abrt [..BS]: number of data transfers aborted by the server - (inc. in eresp) - 51. comp_in [.FB.]: number of HTTP response bytes fed to the compressor - 52. comp_out [.FB.]: number of HTTP response bytes emitted by the compressor - 53. comp_byp [.FB.]: number of bytes that bypassed the HTTP compressor - (CPU/BW limit) - 54. comp_rsp [.FB.]: number of HTTP responses that were compressed - 55. lastsess [..BS]: number of seconds since last session assigned to - server/backend - 56. last_chk [...S]: last health check contents or textual error - 57. last_agt [...S]: last agent check contents or textual error - 58. qtime [..BS]: the average queue time in ms over the 1024 last requests - 59. ctime [..BS]: the average connect time in ms over the 1024 last requests - 60. rtime [..BS]: the average response time in ms over the 1024 last requests - (0 for TCP) - 61. ttime [..BS]: the average total session time in ms over the 1024 last - requests ----- +See section "9.1. CSV format" of the http://www.haproxy.org/download/1.6/doc/management.txt[official HAProxy Management Guide] for a full list of 'stat' fields. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/haproxy/stat/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/haproxy/stat/_meta/fields.yml index 12beb8cb..98b90c3a 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/haproxy/stat/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/haproxy/stat/_meta/fields.yml @@ -1,75 +1,75 @@ - name: stat type: group description: > - Stats collected from HAProxy process + Stats collected from HAProxy processes. fields: - name: status type: keyword description: > - status (UP/DOWN/NOLB/MAINT/MAINT(via)...) + Status (UP, DOWN, NOLB, MAINT, or MAINT(via)...). - name: weight type: long description: > - total weight (backend), server weight (server) + Total weight (for backends), or server weight (for servers). - name: downtime type: long description: > - total downtime (in seconds). The value for the backend - is the downtime for the whole backend, not the sum of the server downtime. + Total downtime (in seconds). For backends, this value is the downtime + for the whole backend, not the sum of the downtime for the servers. - name: component_type type: integer description: > - (0=frontend, 1=backend, 2=server, 3=socket/listener) + Component type (0=frontend, 1=backend, 2=server, or 3=socket/listener). - name: process_id type: integer description: > - process id (0 for first instance, 1 for second, ...) + Process ID (0 for first instance, 1 for second, and so on). - name: service_name type: keyword description: > - service name (FRONTEND for frontend, BACKEND for backend, any name for server/listener) + Service name (FRONTEND for frontend, BACKEND for backend, or any name for server/listener). - name: in.bytes type: long format: bytes description: > - bytes in + Bytes in. - name: out.bytes type: long format: bytes description: > - bytes out + Bytes out. - name: last_change type: integer description: > - number of seconds since the last UP<->DOWN transition + Number of seconds since the last UP->DOWN or DOWN->UP transition. - name: throttle.pct type: scaled_float format: percentage description: > - current throttle percentage for the server, when - slowstart is active, or no value if not in slowstart. + Current throttle percentage for the server when slowstart + is active, or no value if slowstart is inactive. - name: selected.total type: long description: > - total number of times a server was selected, either for new - sessions, or when re-dispatching. The server counter is the number - of times that server was selected. + Total number of times a server was selected, either for new + sessions, or when re-dispatching. For servers, this field reports the + the number of times the server was selected. - name: tracked.id type: long description: > - id of proxy/server if tracking is enabled. + ID of the proxy/server if tracking is enabled. - name: connection type: group @@ -78,17 +78,17 @@ - name: total type: long description: > - cumulative number of connections + Cumulative number of connections. - name: retried type: long description: > - number of times a connection to a server was retried. + Number of times a connection to a server was retried. - name: time.avg type: long description: > - the average connect time in ms over the 1024 last requests + Average connect time in ms over the last 1024 requests. - name: request type: group @@ -97,25 +97,28 @@ - name: denied type: long description: > - requests denied because of security concerns. - * For tcp this is because of a matched tcp-request content rule. - * For http this is because of a matched http-request or tarpit rule. + Requests denied because of security concerns. + + * For TCP this is because of a matched tcp-request content rule. + * For HTTP this is because of a matched http-request or tarpit rule. - name: queued.current type: long description: > - current queued requests. For the backend this reports the number queued without a server assigned + Current queued requests. For backends, this field reports the number + of requests queued without a server assigned. - name: queued.max type: long description: > - max value of queued.current + Maximum value of queued.current. - name: errors type: long description: > - request errors. Some of the possible causes are: - * early termination from the client, before the request has been sent. + Request errors. Some of the possible causes are: + + * early termination from the client, before the request has been sent * read error from the client * client timeout * client closed connection @@ -125,16 +128,16 @@ - name: redispatched type: long description: > - number of times a request was redispatched to another - server. The server value counts the number of times that server was + Number of times a request was redispatched to another server. For + servers, this field reports the number of times the server was switched away from. - name: connection.errors type: long description: > - number of requests that encountered an error trying to - connect to a backend server. The backend stat is the sum of the stat - for all servers of that backend, plus any connection errors not + Number of requests that encountered an error trying to + connect to a server. For backends, this field reports the sum of + the stat for all backend servers, plus any connection errors not associated with a particular server (such as the backend having no active servers). @@ -145,16 +148,16 @@ - name: value type: long description: > - HTTP requests per second over last elapsed second + Number of HTTP requests per second over the last elapsed second. - name: max type: long description: > - max number of HTTP requests per second observed + Maximum number of HTTP requests per second. - name: total type: long description: > - total number of HTTP requests received + Total number of HTTP requests received. - name: response @@ -164,22 +167,24 @@ - name: errors type: long description: > - response errors. srv_abrt will be counted here also. + Number of response errors. This value includes the number of data + transfers aborted by the server (haproxy.stat.server.aborted). Some other errors are: - * write error on the client socket (won't be counted for the server stat) - * failure applying filters to the respons + + * write errors on the client socket (won't be counted for the server stat) + * failure applying filters to the response - name: time.avg type: long description: > - the average response time in ms over the 1024 last requests (0 for TCP) + Average response time in ms over the last 1024 requests (0 for TCP). - name: denied type: integer description: > - responses denied because of security concerns. - * For http this is because of a matched http-request rule, or "option checkcache". + Responses denied because of security concerns. For HTTP this is + because of a matched http-request rule, or "option checkcache". - name: http type: group @@ -189,32 +194,32 @@ - name: 1xx type: long description: > - http responses with 1xx code + HTTP responses with 1xx code. - name: 2xx type: long description: > - http responses with 2xx code + HTTP responses with 2xx code. - name: 3xx type: long description: > - http responses with 3xx code + HTTP responses with 3xx code. - name: 4xx type: long description: > - http responses with 4xx code + HTTP responses with 4xx code. - name: 5xx type: long description: > - http responses with 5xx code + HTTP responses with 5xx code. - name: other type: long description: > - http responses with other codes (protocol error) + HTTP responses with other codes (protocol error). - name: session @@ -224,17 +229,17 @@ - name: current type: long description: > - current sessions + Number of current sessions. - name: max type: long description: > - max sessions + Maximum number of sessions. - name: limit type: long description: > - configured session limit + Configured session limit. - name: rate type: group @@ -242,17 +247,17 @@ - name: value type: integer description: > - number of sessions per second over last elapsed second + Number of sessions per second over the last elapsed second. - name: limit type: integer description: > - configured limit on new sessions per second + Configured limit on new sessions per second. - name: max type: integer description: > - max number of new sessions per second + Maximum number of new sessions per second. - name: check @@ -263,7 +268,8 @@ - name: status type: keyword description: > - status of last health check, one of: + Status of the last health check. One of: + UNK -> unknown INI -> initializing SOCKERR -> socket error @@ -284,12 +290,12 @@ - name: code type: long description: > - layer5-7 code, if available + Layer 5-7 code, if available. - name: duration type: long description: > - time in ms took to finish last health check + Time in ms that it took to finish the last health check. - name: health.last type: long @@ -306,20 +312,19 @@ - name: failed type: long description: > - number of failed checks. (Only counts checks failed when - the server is up.) + Number of checks that failed while the server was up. - name: down type: long description: > - number of UP->DOWN transitions. The backend counter counts - transitions to the whole backend being down, rather than the sum of the - counters for each server. + Number of UP->DOWN transitions. For backends, this value is the + number of transitions to the whole backend being down, rather than + the sum of the transitions for each server. - name: client.aborted type: integer description: > - number of data transfers aborted by the client + Number of data transfers aborted by the client. - name: server @@ -330,22 +335,24 @@ - name: id type: integer description: > - server id (unique inside a proxy) + Server ID (unique inside a proxy). - name: aborted type: integer description: > - number of data transfers aborted by the server (inc. in eresp) + Number of data transfers aborted by the server. This value is + included in haproxy.stat.response.errors. - name: active type: integer description: > - number of active servers (backend), server is active (server) + Number of backend servers that are active, meaning that they are + healthy and can receive requests from the load balancer. - name: backup type: integer description: > - number of backup servers (backend), server is backup (server) + Number of backend servers that are backup servers. - name: compressor @@ -357,25 +364,25 @@ type: long format: bytes description: > - number of HTTP response bytes fed to the compressor + Number of HTTP response bytes fed to the compressor. - name: out.bytes type: integer format: bytes description: > - number of HTTP response bytes emitted by the compressor + Number of HTTP response bytes emitted by the compressor. - name: bypassed.bytes type: long format: bytes description: > - number of bytes that bypassed the HTTP compressor (CPU/BW limit) + Number of bytes that bypassed the HTTP compressor (CPU/BW limit). - name: response.bytes type: long format: bytes description: > - number of HTTP responses that were compressed + Number of HTTP responses that were compressed. - name: proxy type: group @@ -385,12 +392,12 @@ - name: id type: integer description: > - unique proxy id + Unique proxy ID. - name: name type: keyword description: > - proxy name + Proxy name. - name: queue @@ -401,13 +408,13 @@ - name: limit type: integer description: > - configured maxqueue for the server, or nothing in the - value is 0 (default, meaning no limit) + Configured queue limit (maxqueue) for the server, or nothing if the + value of maxqueue is 0 (meaning no limit). - name: time.avg type: integer description: > - the average queue time in ms over the 1024 last requests + The average queue time in ms over the last 1024 requests. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kafka/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/kafka/_meta/docs.asciidoc index 13b1f1e6..b5127454 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kafka/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/kafka/_meta/docs.asciidoc @@ -1,4 +1,6 @@ == kafka Module +experimental[] + This is the kafka Module. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kafka/broker.go b/vendor/github.com/elastic/beats/metricbeat/module/kafka/broker.go new file mode 100644 index 00000000..4f918fef --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kafka/broker.go @@ -0,0 +1,471 @@ +package kafka + +import ( + "bytes" + "crypto/tls" + "fmt" + "io" + "net" + "os" + "strings" + "time" + + "github.com/Shopify/sarama" + "github.com/elastic/beats/libbeat/common" +) + +// Broker provides functionality for communicating with a single kafka broker +type Broker struct { + broker *sarama.Broker + cfg *sarama.Config + + advertisedAddr string + id int32 + matchID bool +} + +// BrokerSettings defines common configurations used when connecting to a broker +type BrokerSettings struct { + MatchID bool + DialTimeout, ReadTimeout time.Duration + ClientID string + Retries int + Backoff time.Duration + TLS *tls.Config + Username, Password string + Version Version +} + +type GroupDescription struct { + Members map[string]MemberDescription +} + +type MemberDescription struct { + Err error + ClientID string + ClientHost string + Topics map[string][]int32 +} + +const noID = -1 + +// NewBroker creates a new unconnected kafka Broker connection instance. +func NewBroker(host string, settings BrokerSettings) *Broker { + cfg := sarama.NewConfig() + cfg.Net.DialTimeout = settings.DialTimeout + cfg.Net.ReadTimeout = settings.ReadTimeout + cfg.ClientID = settings.ClientID + cfg.Metadata.Retry.Max = settings.Retries + cfg.Metadata.Retry.Backoff = settings.Backoff + if tls := settings.TLS; tls != nil { + cfg.Net.TLS.Enable = true + cfg.Net.TLS.Config = tls + } + if user := settings.Username; user != "" { + cfg.Net.SASL.Enable = true + cfg.Net.SASL.User = user + cfg.Net.SASL.Password = settings.Password + } + cfg.Version = settings.Version.get() + + return &Broker{ + broker: sarama.NewBroker(host), + cfg: cfg, + id: noID, + matchID: settings.MatchID, + } +} + +// Close the broker connection +func (b *Broker) Close() error { + closeBroker(b.broker) + return nil +} + +// Connect connects the broker to the configured host +func (b *Broker) Connect() error { + if err := b.broker.Open(b.cfg); err != nil { + return err + } + + if b.id != noID || !b.matchID { + return nil + } + + // current broker is bootstrap only. Get metadata to find id: + meta, err := queryMetadataWithRetry(b.broker, b.cfg, nil) + if err != nil { + closeBroker(b.broker) + return err + } + + other := findMatchingBroker(brokerAddress(b.broker), meta.Brokers) + if other == nil { // no broker found + closeBroker(b.broker) + return fmt.Errorf("No advertised broker with address %v found", b.Addr()) + } + + debugf("found matching broker %v with id %v", other.Addr(), other.ID()) + b.id = other.ID() + b.advertisedAddr = other.Addr() + return nil +} + +// Addr returns the configured broker endpoint. +func (b *Broker) Addr() string { + return b.broker.Addr() +} + +// AdvertisedAddr returns the advertised broker address in case of +// matching broker has been found. +func (b *Broker) AdvertisedAddr() string { + return b.advertisedAddr +} + +// GetMetadata fetches most recent cluster metadata from the broker. +func (b *Broker) GetMetadata(topics ...string) (*sarama.MetadataResponse, error) { + return queryMetadataWithRetry(b.broker, b.cfg, topics) +} + +// GetTopicsMetadata fetches most recent topics/partition metadata from the broker. +func (b *Broker) GetTopicsMetadata(topics ...string) ([]*sarama.TopicMetadata, error) { + r, err := b.GetMetadata(topics...) + if err != nil { + return nil, err + } + return r.Topics, nil +} + +// PartitionOffset fetches the available offset from a partition. +func (b *Broker) PartitionOffset( + replicaID int32, + topic string, + partition int32, + time int64, +) (int64, error) { + req := &sarama.OffsetRequest{} + if replicaID != noID { + req.SetReplicaID(replicaID) + } + req.AddBlock(topic, partition, time, 1) + resp, err := b.broker.GetAvailableOffsets(req) + if err != nil { + return -1, err + } + + block := resp.GetBlock(topic, partition) + if len(block.Offsets) == 0 { + return -1, nil + } + + return block.Offsets[0], nil +} + +// ListGroups lists all groups managed by the broker. Other consumer +// groups might be managed by other brokers. +func (b *Broker) ListGroups() ([]string, error) { + resp, err := b.broker.ListGroups(&sarama.ListGroupsRequest{}) + if err != nil { + return nil, err + } + + if resp.Err != sarama.ErrNoError { + return nil, resp.Err + } + + if len(resp.Groups) == 0 { + return nil, nil + } + + groups := make([]string, 0, len(resp.Groups)) + for name := range resp.Groups { + groups = append(groups, name) + } + return groups, nil +} + +// DescribeGroups fetches group details from broker. +func (b *Broker) DescribeGroups( + queryGroups []string, +) (map[string]GroupDescription, error) { + requ := &sarama.DescribeGroupsRequest{Groups: queryGroups} + resp, err := b.broker.DescribeGroups(requ) + if err != nil { + return nil, err + } + + if len(resp.Groups) == 0 { + return nil, nil + } + + groups := map[string]GroupDescription{} + for _, descr := range resp.Groups { + if len(descr.Members) == 0 { + groups[descr.GroupId] = GroupDescription{} + continue + } + + members := map[string]MemberDescription{} + for memberID, memberDescr := range descr.Members { + assignment, err := memberDescr.GetMemberAssignment() + if err != nil { + members[memberID] = MemberDescription{ + ClientID: memberDescr.ClientId, + ClientHost: memberDescr.ClientHost, + Err: err, + } + continue + } + + members[memberID] = MemberDescription{ + ClientID: memberDescr.ClientId, + ClientHost: memberDescr.ClientHost, + Topics: assignment.Topics, + } + } + groups[descr.GroupId] = GroupDescription{Members: members} + } + + return groups, nil +} + +func (b *Broker) FetchGroupOffsets(group string) (*sarama.OffsetFetchResponse, error) { + requ := &sarama.OffsetFetchRequest{ + ConsumerGroup: group, + Version: 1, + } + return b.broker.FetchOffset(requ) +} + +// ID returns the broker or -1 if the broker id is unknown. +func (b *Broker) ID() int32 { + if b.id == noID { + return b.broker.ID() + } + return b.id +} + +func queryMetadataWithRetry( + b *sarama.Broker, + cfg *sarama.Config, + topics []string, +) (r *sarama.MetadataResponse, err error) { + err = withRetry(b, cfg, func() (e error) { + requ := &sarama.MetadataRequest{Topics: topics} + r, e = b.GetMetadata(requ) + return + }) + return +} + +func closeBroker(b *sarama.Broker) { + if ok, _ := b.Connected(); ok { + b.Close() + } +} + +func withRetry( + b *sarama.Broker, + cfg *sarama.Config, + f func() error, +) error { + var err error + for max := 0; max < cfg.Metadata.Retry.Max; max++ { + if ok, _ := b.Connected(); !ok { + if err = b.Open(cfg); err == nil { + err = f() + } + } else { + err = f() + } + + if err == nil { + return nil + } + + retry, reconnect := checkRetryQuery(err) + if !retry { + return err + } + + time.Sleep(cfg.Metadata.Retry.Backoff) + if reconnect { + closeBroker(b) + } + } + return err +} + +func checkRetryQuery(err error) (retry, reconnect bool) { + if err == nil { + return false, false + } + + if err == io.EOF { + return true, true + } + + k, ok := err.(sarama.KError) + if !ok { + return false, false + } + + switch k { + case sarama.ErrLeaderNotAvailable, sarama.ErrReplicaNotAvailable, + sarama.ErrOffsetsLoadInProgress, sarama.ErrRebalanceInProgress: + return true, false + case sarama.ErrRequestTimedOut, sarama.ErrBrokerNotAvailable, + sarama.ErrNetworkException: + return true, true + } + + return false, false +} + +func findMatchingBroker( + addr string, + brokers []*sarama.Broker, +) *sarama.Broker { + lst := brokerAddresses(brokers) + if idx, found := findMatchingAddress(addr, lst); found { + return brokers[idx] + } + return nil +} + +func findMatchingAddress( + addr string, + brokers []string, +) (int, bool) { + debugf("Try to match broker to: %v", addr) + + // compare connection address to list of broker addresses + if i, found := indexOf(addr, brokers); found { + return i, true + } + + // get connection 'port' + _, port, err := net.SplitHostPort(addr) + if err != nil || port == "" { + port = "9092" + } + + // lookup local machines ips for comparing with broker addresses + localIPs, err := common.LocalIPAddrs() + if err != nil || len(localIPs) == 0 { + return -1, false + } + debugf("local machine ips: %v", localIPs) + + // try to find broker by comparing the fqdn for each known ip to list of + // brokers + localHosts := lookupHosts(localIPs) + debugf("local machine addresses: %v", localHosts) + for _, host := range localHosts { + debugf("try to match with fqdn: %v (%v)", host, port) + if i, found := indexOf(net.JoinHostPort(host, port), brokers); found { + return i, true + } + } + + // try to find broker id by comparing the machines local hostname to + // broker hostnames in metadata + if host, err := os.Hostname(); err == nil { + debugf("try to match with hostname only: %v (%v)", host, port) + + tmp := net.JoinHostPort(strings.ToLower(host), port) + if i, found := indexOf(tmp, brokers); found { + return i, true + } + } + + // lookup ips for all brokers + debugf("match by ips") + for i, b := range brokers { + debugf("test broker address: %v", b) + bh, bp, err := net.SplitHostPort(b) + if err != nil { + continue + } + + // port numbers do not match + if bp != port { + continue + } + + // lookup all ips for brokers host: + ips, err := net.LookupIP(bh) + debugf("broker %v ips: %v, %v", bh, ips, err) + if err != nil { + continue + } + + debugf("broker (%v) ips: %v", bh, ips) + + // check if ip is known + if anyIPsMatch(ips, localIPs) { + return i, true + } + } + + return -1, false +} + +func lookupHosts(ips []net.IP) []string { + set := map[string]struct{}{} + for _, ip := range ips { + txt, err := ip.MarshalText() + if err != nil { + continue + } + + hosts, err := net.LookupAddr(string(txt)) + debugf("lookup %v => %v, %v", string(txt), hosts, err) + if err != nil { + continue + } + + for _, host := range hosts { + h := strings.ToLower(strings.TrimSuffix(host, ".")) + set[h] = struct{}{} + } + } + + hosts := make([]string, 0, len(set)) + for host := range set { + hosts = append(hosts, host) + } + return hosts +} + +func anyIPsMatch(as, bs []net.IP) bool { + for _, a := range as { + for _, b := range bs { + if bytes.Equal(a, b) { + return true + } + } + } + return false +} + +func brokerAddresses(brokers []*sarama.Broker) []string { + addresses := make([]string, len(brokers)) + for i, b := range brokers { + addresses[i] = brokerAddress(b) + } + return addresses +} + +func brokerAddress(b *sarama.Broker) string { + return strings.ToLower(b.Addr()) +} + +func indexOf(s string, lst []string) (int, bool) { + for i, v := range lst { + if s == v { + return i, true + } + } + return -1, false +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kafka/consumergroup/_meta/data.json b/vendor/github.com/elastic/beats/metricbeat/module/kafka/consumergroup/_meta/data.json new file mode 100644 index 00000000..cf7dcd10 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kafka/consumergroup/_meta/data.json @@ -0,0 +1,36 @@ +{ + "@timestamp":"2016-05-23T08:05:34.853Z", + "type":"metricsets", + "beat":{ + "hostname":"localhost", + "name":"localhost", + "version": "6.0.0-alpha1" + }, + "metricset":{ + "host":"localhost", + "module":"kafka", + "name":"consumergroup", + "rtt":269 + }, + "kafka":{ + "consumergroup":{ + "id": "group", + "topic": "test", + "partition": 0, + "client": { + "host": "127.0.0.1", + "id": "client0", + "member_id": "client0-d20b677a-5740-433e-a7f8-fbdab1f0f150" + }, + "broker": { + "address": "kafka0:9092", + "id": 0 + }, + "error": { + "code": 0 + }, + "meta": "", + "offset": 0 + } + } +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kafka/consumergroup/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/kafka/consumergroup/_meta/docs.asciidoc new file mode 100644 index 00000000..fcc5a6d3 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kafka/consumergroup/_meta/docs.asciidoc @@ -0,0 +1,3 @@ +=== kafka consumergroup MetricSet + +This is the consumergroup metricset of the module kafka. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kafka/consumergroup/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/kafka/consumergroup/_meta/fields.yml new file mode 100644 index 00000000..a69a7b8c --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kafka/consumergroup/_meta/fields.yml @@ -0,0 +1,62 @@ +- name: consumergroup + type: group + description: > + consumergroup + fields: + - name: broker + type: group + description: > + Broker Consumer Group Information have been read from (Broker handling + the consumer group). + fields: + - name: id + type: long + description: > + Broker id + + - name: address + type: keyword + description: > + Broker address + + - name: id + type: keyword + description: Consumer Group ID + + - name: topic + type: keyword + description: Topic name + + - name: partition + type: long + description: Partition ID + + - name: offset + type: long + description: consumer offset into partition being read + + - name: meta + type: text + description: custom consumer meta data string + + - name: error.code + type: long + description: > + kafka consumer/partition error code. + + - name: client + type: group + description: > + Assigned client reading events from partition + fields: + - name: id + type: keyword + description: Client ID (kafka setting client.id) + + - name: host + type: keyword + description: Client host + + - name: member_id + type: keyword + description: internal consumer group member ID diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kafka/consumergroup/config.go b/vendor/github.com/elastic/beats/metricbeat/module/kafka/consumergroup/config.go new file mode 100644 index 00000000..b06710d2 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kafka/consumergroup/config.go @@ -0,0 +1,37 @@ +package consumergroup + +import ( + "fmt" + "time" + + "github.com/elastic/beats/libbeat/outputs" +) + +type metricsetConfig struct { + Retries int `config:"retries" validate:"min=0"` + Backoff time.Duration `config:"backoff" validate:"min=0"` + TLS *outputs.TLSConfig `config:"ssl"` + Username string `config:"username"` + Password string `config:"password"` + ClientID string `config:"client_id"` + + Groups []string `config:"groups"` + Topics []string `config:"topics"` +} + +var defaultConfig = metricsetConfig{ + Retries: 3, + Backoff: 250 * time.Millisecond, + TLS: nil, + Username: "", + Password: "", + ClientID: "metricbeat", +} + +func (c *metricsetConfig) Validate() error { + if c.Username != "" && c.Password == "" { + return fmt.Errorf("password must be set when username is configured") + } + + return nil +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kafka/consumergroup/consumergroup.go b/vendor/github.com/elastic/beats/metricbeat/module/kafka/consumergroup/consumergroup.go new file mode 100644 index 00000000..3fcb81a8 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kafka/consumergroup/consumergroup.go @@ -0,0 +1,101 @@ +package consumergroup + +import ( + "crypto/tls" + + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/logp" + "github.com/elastic/beats/libbeat/outputs" + "github.com/elastic/beats/metricbeat/mb" + "github.com/elastic/beats/metricbeat/module/kafka" +) + +// init registers the MetricSet with the central registry. +func init() { + if err := mb.Registry.AddMetricSet("kafka", "consumergroup", New); err != nil { + panic(err) + } +} + +// MetricSet type defines all fields of the MetricSet +type MetricSet struct { + mb.BaseMetricSet + + broker *kafka.Broker + topics nameSet + groups nameSet +} + +type groupAssignment struct { + clientID string + memberID string + clientHost string +} + +var debugf = logp.MakeDebug("kafka") + +// New creates a new instance of the MetricSet. +func New(base mb.BaseMetricSet) (mb.MetricSet, error) { + logp.Warn("EXPERIMENTAL: The kafka consumergroup metricset is experimental") + + config := defaultConfig + if err := base.Module().UnpackConfig(&config); err != nil { + return nil, err + } + + var tls *tls.Config + tlsCfg, err := outputs.LoadTLSConfig(config.TLS) + if err != nil { + return nil, err + } + if tlsCfg != nil { + tls = tlsCfg.BuildModuleConfig("") + } + + timeout := base.Module().Config().Timeout + + cfg := kafka.BrokerSettings{ + MatchID: true, + DialTimeout: timeout, + ReadTimeout: timeout, + ClientID: config.ClientID, + Retries: config.Retries, + Backoff: config.Backoff, + TLS: tls, + Username: config.Username, + Password: config.Password, + + // consumer groups API requires at least 0.9.0.0 + Version: kafka.Version{"0.9.0.0"}, + } + + return &MetricSet{ + BaseMetricSet: base, + broker: kafka.NewBroker(base.Host(), cfg), + groups: makeNameSet(config.Groups...), + topics: makeNameSet(config.Topics...), + }, nil +} + +func (m *MetricSet) Fetch() ([]common.MapStr, error) { + if err := m.broker.Connect(); err != nil { + logp.Err("broker connect failed: %v", err) + return nil, err + } + + b := m.broker + defer b.Close() + + brokerInfo := common.MapStr{ + "id": b.ID(), + "address": b.AdvertisedAddr(), + } + + var events []common.MapStr + emitEvent := func(event common.MapStr) { + event["broker"] = brokerInfo + events = append(events, event) + } + err := fetchGroupInfo(emitEvent, b, m.groups.pred(), m.topics.pred()) + return events, err +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kafka/consumergroup/mock_test.go b/vendor/github.com/elastic/beats/metricbeat/module/kafka/consumergroup/mock_test.go new file mode 100644 index 00000000..238af427 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kafka/consumergroup/mock_test.go @@ -0,0 +1,130 @@ +package consumergroup + +import ( + "fmt" + "math/rand" + + "github.com/Shopify/sarama" + + "github.com/elastic/beats/metricbeat/module/kafka" +) + +type mockClient struct { + listGroups func() ([]string, error) + describeGroups func(group []string) (map[string]kafka.GroupDescription, error) + fetchGroupOffsets func(group string) (*sarama.OffsetFetchResponse, error) +} + +type mockState struct { + // group -> topics -> partitions -> offset + partitions map[string]map[string][]int64 // topics with group partition offsets + + // groups->client->topic->partitions ids + groups map[string][]map[string][]int32 // group/client assignments to topics and partition IDs +} + +func defaultMockClient(state mockState) *mockClient { + return &mockClient{ + listGroups: makeListGroups(state), + describeGroups: makeDescribeGroups(state), + fetchGroupOffsets: makeFetchGroupOffsets(state), + } +} + +func (c *mockClient) with(fn func(*mockClient)) *mockClient { + fn(c) + return c +} + +func makeListGroups(state mockState) func() ([]string, error) { + names := make([]string, 0, len(state.groups)) + for name := range state.groups { + names = append(names, name) + } + + return func() ([]string, error) { + return names, nil + } +} + +func makeDescribeGroups( + state mockState, +) func([]string) (map[string]kafka.GroupDescription, error) { + groups := map[string]kafka.GroupDescription{} + for name, st := range state.groups { + members := map[string]kafka.MemberDescription{} + for i, member := range st { + clientID := fmt.Sprintf("consumer-%v", i) + memberID := fmt.Sprintf("%v-%v", clientID, rand.Int()) + members[memberID] = kafka.MemberDescription{ + ClientID: clientID, + ClientHost: "/" + clientID, + Topics: member, + } + } + groups[name] = kafka.GroupDescription{Members: members} + } + + return func(group []string) (map[string]kafka.GroupDescription, error) { + ret := map[string]kafka.GroupDescription{} + for _, name := range group { + if g, found := groups[name]; found { + ret[name] = g + } + } + + if len(ret) == 0 { + ret = nil + } + return ret, nil + } +} + +func makeDescribeGroupsFail( + err error, +) func([]string) (map[string]kafka.GroupDescription, error) { + return func(_ []string) (map[string]kafka.GroupDescription, error) { + return nil, err + } +} + +func makeFetchGroupOffsets( + state mockState, +) func(group string) (*sarama.OffsetFetchResponse, error) { + return func(group string) (*sarama.OffsetFetchResponse, error) { + topics := state.partitions[group] + if topics == nil { + return &sarama.OffsetFetchResponse{}, nil + } + + blocks := map[string]map[int32]*sarama.OffsetFetchResponseBlock{} + for topic, partition := range topics { + T := map[int32]*sarama.OffsetFetchResponseBlock{} + blocks[topic] = T + + for i, offset := range partition { + T[int32(i)] = &sarama.OffsetFetchResponseBlock{ + Offset: int64(offset), + } + } + } + + return &sarama.OffsetFetchResponse{Blocks: blocks}, nil + } +} + +func makeFetchGroupOffsetsFail( + err error, +) func(string) (*sarama.OffsetFetchResponse, error) { + return func(_ string) (*sarama.OffsetFetchResponse, error) { + return nil, err + } +} + +func (c *mockClient) ListGroups() ([]string, error) { return c.listGroups() } +func (c *mockClient) DescribeGroups(groups []string) (map[string]kafka.GroupDescription, error) { + return c.describeGroups(groups) +} +func (c *mockClient) FetchGroupOffsets(group string) (*sarama.OffsetFetchResponse, error) { + return c.fetchGroupOffsets(group) +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kafka/consumergroup/nameset.go b/vendor/github.com/elastic/beats/metricbeat/module/kafka/consumergroup/nameset.go new file mode 100644 index 00000000..53435766 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kafka/consumergroup/nameset.go @@ -0,0 +1,31 @@ +package consumergroup + +type nameSet map[string]struct{} + +func makeNameSet(strings ...string) nameSet { + if len(strings) == 0 { + return nil + } + + set := nameSet{} + for _, s := range strings { + set[s] = struct{}{} + } + return set +} + +func (s nameSet) has(name string) bool { + if s == nil { + return true + } + + _, ok := s[name] + return ok +} + +func (s nameSet) pred() func(string) bool { + if s == nil || len(s) == 0 { + return nil + } + return s.has +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kafka/consumergroup/query.go b/vendor/github.com/elastic/beats/metricbeat/module/kafka/consumergroup/query.go new file mode 100644 index 00000000..f13277f3 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kafka/consumergroup/query.go @@ -0,0 +1,210 @@ +package consumergroup + +import ( + "sync" + + "github.com/Shopify/sarama" + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/logp" + "github.com/elastic/beats/metricbeat/module/kafka" +) + +type client interface { + ListGroups() ([]string, error) + DescribeGroups(group []string) (map[string]kafka.GroupDescription, error) + FetchGroupOffsets(group string) (*sarama.OffsetFetchResponse, error) +} + +func fetchGroupInfo( + emit func(common.MapStr), + b client, + groupsFilter, topicsFilter func(string) bool, +) error { + type result struct { + err error + group string + off *sarama.OffsetFetchResponse + } + + groups, err := listGroups(b, groupsFilter) + if err != nil { + logp.Err("failed to list known kafka groups: %v", err) + return err + } + if len(groups) == 0 { + return nil + } + + debugf("known consumer groups: ", groups) + + wg := sync.WaitGroup{} + results := make(chan result, len(groups)) + for _, group := range groups { + group := group + + wg.Add(1) + go func() { + defer wg.Done() + resp, err := fetchGroupOffset(b, group, topicsFilter) + if err != nil { + logp.Err("failed to fetch '%v' group offset: %v", group, err) + } + results <- result{err, group, resp} + }() + } + + go func() { + wg.Wait() + close(results) + }() + + assignments, err := fetchGroupAssignments(b, groups) + if err != nil { + // wait for workers to stop and drop results + for range results { + } + + return err + } + + for ret := range results { + if err := ret.err; err != nil { + // wait for workers to stop and drop results + for range results { + } + return err + } + + asgnGroup := assignments[ret.group] + for topic, partitions := range ret.off.Blocks { + var asgnTopic map[int32]groupAssignment + if asgnGroup != nil { + asgnTopic = asgnGroup[topic] + } + + for partition, info := range partitions { + event := common.MapStr{ + "id": ret.group, + "topic": topic, + "partition": partition, + "offset": info.Offset, + "meta": info.Metadata, + "error": common.MapStr{ + "code": info.Err, + }, + } + + if asgnTopic != nil { + if assignment, found := asgnTopic[partition]; found { + event["client"] = common.MapStr{ + "id": assignment.clientID, + "host": assignment.clientHost, + "member_id": assignment.memberID, + } + } + } + + emit(event) + } + } + } + + return nil +} + +func listGroups(b client, filter func(string) bool) ([]string, error) { + groups, err := b.ListGroups() + if err != nil { + return nil, err + } + + if filter == nil { + return groups, nil + } + + filtered := groups[:0] + for _, name := range groups { + if filter(name) { + filtered = append(filtered, name) + } + } + return filtered, nil +} + +func fetchGroupAssignments( + b client, + groupIDs []string, +) (map[string]map[string]map[int32]groupAssignment, error) { + resp, err := b.DescribeGroups(groupIDs) + if err != nil { + return nil, err + } + + groups := map[string]map[string]map[int32]groupAssignment{} + +groupLoop: + for groupID, info := range resp { + G := groups[groupID] + if G == nil { + G = map[string]map[int32]groupAssignment{} + groups[groupID] = G + } + + for memberID, memberDescr := range info.Members { + if memberDescr.Err != nil { + // group doesn't seem to use standardized member assignment encoding + // => try next group + continue groupLoop + } + + clientID := memberDescr.ClientID + clientHost := memberDescr.ClientHost + if len(clientHost) > 1 && clientHost[0] == '/' { + clientHost = clientHost[1:] + } + + meta := groupAssignment{ + memberID: memberID, + clientID: clientID, + clientHost: clientHost, + } + + for topic, partitions := range memberDescr.Topics { + T := G[topic] + if T == nil { + T = map[int32]groupAssignment{} + G[topic] = T + } + + for _, partition := range partitions { + T[partition] = meta + } + } + } + } + + return groups, nil +} + +func fetchGroupOffset( + b client, + group string, + topics func(string) bool, +) (*sarama.OffsetFetchResponse, error) { + resp, err := b.FetchGroupOffsets(group) + if err != nil { + return nil, err + } + + if topics == nil { + return resp, err + } + + for topic := range resp.Blocks { + if !topics(topic) { + delete(resp.Blocks, topic) + } + } + + return resp, nil +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kafka/consumergroup/query_test.go b/vendor/github.com/elastic/beats/metricbeat/module/kafka/consumergroup/query_test.go new file mode 100644 index 00000000..ce65e39f --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kafka/consumergroup/query_test.go @@ -0,0 +1,265 @@ +package consumergroup + +import ( + "fmt" + "io" + "reflect" + "testing" + + "github.com/elastic/beats/libbeat/common" + "github.com/stretchr/testify/assert" +) + +func TestFetchGroupInfo(t *testing.T) { + noEvents := func(events []common.MapStr) { + assert.Len(t, events, 0) + } + + tests := []struct { + name string + client client + groups []string + topics []string + err error + expected []common.MapStr + validate func([]common.MapStr) + }{ + { + name: "Test all groups", + client: defaultMockClient(mockState{ + partitions: map[string]map[string][]int64{ + "group1": { + "topic1": {10, 11, 12}, + "topic3": {6, 7}, + }, + "group2": { + "topic2": {3}, + "topic3": {9, 10}, + }, + }, + groups: map[string][]map[string][]int32{ + "group1": { + {"topic1": {0, 2}, "topic3": {1}}, + {"topic1": {1}, "topic3": {0}}, + }, + "group2": { + {"topic2": {0}, "topic3": {0, 1}}, + }, + }, + }), + expected: []common.MapStr{ + testEvent("group1", "topic1", 0, common.MapStr{ + "client": clientMeta(0), + "offset": int64(10), + }), + testEvent("group1", "topic1", 1, common.MapStr{ + "client": clientMeta(1), + "offset": int64(11), + }), + testEvent("group1", "topic1", 2, common.MapStr{ + "client": clientMeta(0), + "offset": int64(12), + }), + testEvent("group1", "topic3", 0, common.MapStr{ + "client": clientMeta(1), + "offset": int64(6), + }), + testEvent("group1", "topic3", 1, common.MapStr{ + "client": clientMeta(0), + "offset": int64(7), + }), + testEvent("group2", "topic2", 0, common.MapStr{ + "client": clientMeta(0), + "offset": int64(3), + }), + testEvent("group2", "topic3", 0, common.MapStr{ + "client": clientMeta(0), + "offset": int64(9), + }), + testEvent("group2", "topic3", 1, common.MapStr{ + "client": clientMeta(0), + "offset": int64(10), + }), + }, + }, + + { + name: "filter topics and groups", + client: defaultMockClient(mockState{ + partitions: map[string]map[string][]int64{ + "group1": { + "topic1": {1, 2}, + "topic2": {3, 4}, + }, + "group2": { + "topic2": {5, 6}, + "topic3": {7, 8}, + }, + }, + groups: map[string][]map[string][]int32{ + "group1": { + {"topic1": {0, 1}, "topic2": {0, 1}}, + }, + "group2": { + {"topic1": {0, 1}, "topic2": {0, 1}}, + }, + }, + }), + groups: []string{"group1"}, + topics: []string{"topic1"}, + expected: []common.MapStr{ + testEvent("group1", "topic1", 0, common.MapStr{ + "client": clientMeta(0), + "offset": int64(1), + }), + testEvent("group1", "topic1", 1, common.MapStr{ + "client": clientMeta(0), + "offset": int64(2), + }), + }, + }, + + { + name: "no events on empty group", + client: defaultMockClient(mockState{}), + validate: noEvents, + }, + + { + name: "fail to list groups", + client: defaultMockClient(mockState{}).with(func(c *mockClient) { + c.listGroups = func() ([]string, error) { + return nil, io.EOF + } + }), + err: io.EOF, + validate: noEvents, + }, + + { + name: "fail if assignment query fails", + client: defaultMockClient(mockState{ + partitions: map[string]map[string][]int64{ + "group1": {"topic1": {1}}, + }, + groups: map[string][]map[string][]int32{ + "group1": {{"topic1": {0}}}, + }, + }).with(func(c *mockClient) { + c.describeGroups = makeDescribeGroupsFail(io.EOF) + }), + err: io.EOF, + validate: noEvents, + }, + + { + name: "fail when fetching group offsets", + client: defaultMockClient(mockState{ + partitions: map[string]map[string][]int64{ + "group1": {"topic1": {1}}, + }, + groups: map[string][]map[string][]int32{ + "group1": {{"topic1": {0}}}, + }, + }).with(func(c *mockClient) { + c.fetchGroupOffsets = makeFetchGroupOffsetsFail(io.EOF) + }), + err: io.EOF, + validate: noEvents, + }, + } + + for i, test := range tests { + t.Logf("run test (%v): %v", i, test.name) + + var events []common.MapStr + collectEvents := func(event common.MapStr) { + t.Logf("new event: %v", event) + events = append(events, event) + } + + indexEvents := func(events []common.MapStr) map[string]common.MapStr { + index := map[string]common.MapStr{} + for _, e := range events { + key := fmt.Sprintf("%v::%v::%v", + e["id"], e["topic"], e["partition"], + ) + index[key] = e + } + return index + } + + groups := makeNameSet(test.groups...).pred() + topics := makeNameSet(test.topics...).pred() + err := fetchGroupInfo(collectEvents, test.client, groups, topics) + if err != nil { + switch { + case test.err == nil: + t.Fatal(err) + case test.err != err: + t.Error(err) + } + continue + } + + indexed := indexEvents(events) + for key, expected := range indexEvents(test.expected) { + event, found := indexed[key] + if !found { + t.Errorf("Missing event: %v", key) + continue + } + assertEvent(t, expected, event) + } + + if test.validate != nil { + test.validate(events) + } + } +} + +func assertEvent(t *testing.T, expected, event common.MapStr) { + for field, exp := range expected { + val, found := event[field] + if !found { + t.Errorf("Missing field: %v", field) + continue + } + + if sub, ok := exp.(common.MapStr); ok { + assertEvent(t, sub, val.(common.MapStr)) + } else { + if !assert.Equal(t, exp, val) { + t.Logf("failed in field: %v", field) + t.Logf("type expected: %v", reflect.TypeOf(exp)) + t.Logf("type event: %v", reflect.TypeOf(val)) + t.Logf("------------------------------") + } + } + } +} + +func testEvent( + group, topic string, + partition int, + fields ...common.MapStr, +) common.MapStr { + event := common.MapStr{ + "id": group, + "topic": topic, + "partition": int32(partition), + } + + for _, extra := range fields { + for k, v := range extra { + event[k] = v + } + } + return event +} + +func clientMeta(id int) common.MapStr { + return common.MapStr{ + "id": fmt.Sprintf("consumer-%v", id), + } +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kafka/kafka.go b/vendor/github.com/elastic/beats/metricbeat/module/kafka/kafka.go new file mode 100644 index 00000000..d482bf1a --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kafka/kafka.go @@ -0,0 +1,5 @@ +package kafka + +import "github.com/elastic/beats/libbeat/logp" + +var debugf = logp.MakeDebug("kafka") diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kafka/partition/config.go b/vendor/github.com/elastic/beats/metricbeat/module/kafka/partition/config.go index e36e8cb2..d5880775 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kafka/partition/config.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kafka/partition/config.go @@ -17,9 +17,6 @@ type connConfig struct { Topics []string `config:"topics"` } -type metaConfig struct { -} - var defaultConfig = connConfig{ Retries: 3, Backoff: 250 * time.Millisecond, diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kafka/partition/partition.go b/vendor/github.com/elastic/beats/metricbeat/module/kafka/partition/partition.go index 59c02e79..a29b3654 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kafka/partition/partition.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kafka/partition/partition.go @@ -1,16 +1,15 @@ package partition import ( + "crypto/tls" "errors" - "fmt" - "io" - "time" "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/libbeat/outputs" "github.com/elastic/beats/metricbeat/mb" "github.com/elastic/beats/metricbeat/mb/parse" + "github.com/elastic/beats/metricbeat/module/kafka" "github.com/Shopify/sarama" ) @@ -26,86 +25,57 @@ func init() { type MetricSet struct { mb.BaseMetricSet - broker *sarama.Broker - cfg *sarama.Config - id int32 + broker *kafka.Broker topics []string } -var noID int32 = -1 +const noID int32 = -1 var errFailQueryOffset = errors.New("operation failed") -// New create a new instance of the partition MetricSet +var debugf = logp.MakeDebug("kafka") + +// New creates a new instance of the partition MetricSet. func New(base mb.BaseMetricSet) (mb.MetricSet, error) { + logp.Warn("EXPERIMENTAL: The kafka partition metricset is experimental") + config := defaultConfig if err := base.Module().UnpackConfig(&config); err != nil { return nil, err } - tls, err := outputs.LoadTLSConfig(config.TLS) + var tls *tls.Config + tlsCfg, err := outputs.LoadTLSConfig(config.TLS) if err != nil { return nil, err } - - cfg := sarama.NewConfig() - cfg.Net.DialTimeout = base.Module().Config().Timeout - cfg.Net.ReadTimeout = base.Module().Config().Timeout - cfg.ClientID = config.ClientID - cfg.Metadata.Retry.Max = config.Retries - cfg.Metadata.Retry.Backoff = config.Backoff - if tls != nil { - cfg.Net.TLS.Enable = true - cfg.Net.TLS.Config = tls.BuildModuleConfig("") - } - if config.Username != "" { - cfg.Net.SASL.Enable = true - cfg.Net.SASL.User = config.Username - cfg.Net.SASL.Password = config.Password + if tlsCfg != nil { + tls = tlsCfg.BuildModuleConfig("") + } + + timeout := base.Module().Config().Timeout + cfg := kafka.BrokerSettings{ + MatchID: true, + DialTimeout: timeout, + ReadTimeout: timeout, + ClientID: config.ClientID, + Retries: config.Retries, + Backoff: config.Backoff, + TLS: tls, + Username: config.Username, + Password: config.Password, } - broker := sarama.NewBroker(base.Host()) return &MetricSet{ BaseMetricSet: base, - broker: broker, - cfg: cfg, - id: noID, + broker: kafka.NewBroker(base.Host(), cfg), topics: config.Topics, }, nil } -func (m *MetricSet) connect() (*sarama.Broker, error) { - b := m.broker - if err := b.Open(m.cfg); err != nil { - return nil, err - } - - if m.id != noID { - return b, nil - } - - // current broker is bootstrap only. Get metadata to find id: - meta, err := queryMetadataWithRetry(b, m.cfg, m.topics) - if err != nil { - closeBroker(b) - return nil, err - } - - addr := b.Addr() - for _, other := range meta.Brokers { - if other.Addr() == addr { - m.id = other.ID() - break - } - } - - if m.id == noID { - closeBroker(b) - err = fmt.Errorf("No advertised broker with address %v found", addr) - return nil, err - } - - return b, nil +func (m *MetricSet) connect() (*kafka.Broker, error) { + err := m.broker.Connect() + return m.broker, err } // Fetch partition stats list from kafka @@ -115,19 +85,20 @@ func (m *MetricSet) Fetch() ([]common.MapStr, error) { return nil, err } - defer closeBroker(b) - response, err := queryMetadataWithRetry(b, m.cfg, m.topics) + defer b.Close() + topics, err := b.GetTopicsMetadata(m.topics...) if err != nil { return nil, err } events := []common.MapStr{} evtBroker := common.MapStr{ - "id": m.id, + "id": b.ID(), "address": b.Addr(), } - for _, topic := range response.Topics { + for _, topic := range topics { + debugf("fetch events for topic: ", topic.Name) evtTopic := common.MapStr{ "name": topic.Name, } @@ -140,7 +111,8 @@ func (m *MetricSet) Fetch() ([]common.MapStr, error) { for _, partition := range topic.Partitions { // partition offsets can be queried from leader only - if m.id != partition.Leader { + if b.ID() != partition.Leader { + debugf("broker is not leader (broker=%v, leader=%v)", b.ID(), partition.Leader) continue } @@ -192,6 +164,29 @@ func (m *MetricSet) Fetch() ([]common.MapStr, error) { return events, nil } +// queryOffsetRange queries the broker for the oldest and the newest offsets in +// a kafka topics partition for a given replica. +func queryOffsetRange( + b *kafka.Broker, + replicaID int32, + topic string, + partition int32, +) (int64, int64, bool, error) { + oldest, err := b.PartitionOffset(replicaID, topic, partition, sarama.OffsetOldest) + if err != nil { + return -1, -1, false, err + } + + newest, err := b.PartitionOffset(replicaID, topic, partition, sarama.OffsetNewest) + if err != nil { + return -1, -1, false, err + } + + okOld := oldest != -1 + okNew := newest != -1 + return oldest, newest, okOld && okNew, nil +} + func hasID(id int32, lst []int32) bool { for _, other := range lst { if id == other { @@ -200,125 +195,3 @@ func hasID(id int32, lst []int32) bool { } return false } - -// queryOffsetRange queries the broker for the oldest and the newest offsets in -// a kafka topics partition for a given replica. -func queryOffsetRange( - b *sarama.Broker, - replicaID int32, - topic string, - partition int32, -) (int64, int64, bool, error) { - oldest, okOld, err := queryOffset(b, replicaID, topic, partition, sarama.OffsetOldest) - if err != nil { - return -1, -1, false, err - } - - newest, okNew, err := queryOffset(b, replicaID, topic, partition, sarama.OffsetNewest) - if err != nil { - return -1, -1, false, err - } - - return oldest, newest, okOld && okNew, nil -} - -func queryOffset( - b *sarama.Broker, - replicaID int32, - topic string, - partition int32, - time int64, -) (int64, bool, error) { - req := &sarama.OffsetRequest{} - if replicaID != noID { - req.SetReplicaID(replicaID) - } - req.AddBlock(topic, partition, time, 1) - resp, err := b.GetAvailableOffsets(req) - if err != nil { - return -1, false, err - } - - block := resp.GetBlock(topic, partition) - if len(block.Offsets) == 0 { - return -1, false, nil - } - - return block.Offsets[0], true, nil -} - -func closeBroker(b *sarama.Broker) { - if ok, _ := b.Connected(); ok { - b.Close() - } -} - -func queryMetadataWithRetry( - b *sarama.Broker, - cfg *sarama.Config, - topics []string, -) (r *sarama.MetadataResponse, err error) { - err = withRetry(b, cfg, func() (e error) { - r, e = b.GetMetadata(&sarama.MetadataRequest{topics}) - return - }) - return -} - -func withRetry( - b *sarama.Broker, - cfg *sarama.Config, - f func() error, -) error { - var err error - for max := 0; max < cfg.Metadata.Retry.Max; max++ { - if ok, _ := b.Connected(); !ok { - if err = b.Open(cfg); err == nil { - err = f() - } - } else { - err = f() - } - - if err == nil { - return nil - } - - retry, reconnect := checkRetryQuery(err) - if !retry { - return err - } - - time.Sleep(cfg.Metadata.Retry.Backoff) - if reconnect { - closeBroker(b) - } - } - return err -} - -func checkRetryQuery(err error) (retry, reconnect bool) { - if err == nil { - return false, false - } - - if err == io.EOF { - return true, true - } - - k, ok := err.(sarama.KError) - if !ok { - return false, false - } - - switch k { - case sarama.ErrLeaderNotAvailable, sarama.ErrReplicaNotAvailable, - sarama.ErrOffsetsLoadInProgress, sarama.ErrRebalanceInProgress: - return true, false - case sarama.ErrRequestTimedOut, sarama.ErrBrokerNotAvailable, - sarama.ErrNetworkException: - return true, true - } - - return false, false -} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kafka/partition/partition_integration_test.go b/vendor/github.com/elastic/beats/metricbeat/module/kafka/partition/partition_integration_test.go index a76f75b2..687f9f3d 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kafka/partition/partition_integration_test.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kafka/partition/partition_integration_test.go @@ -12,6 +12,7 @@ import ( "github.com/Shopify/sarama" "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/logp" mbtest "github.com/elastic/beats/metricbeat/mb/testing" "github.com/stretchr/testify/assert" ) @@ -24,7 +25,7 @@ const ( func TestData(t *testing.T) { generateKafkaData(t, "metricbeat-generate-data") - f := mbtest.NewEventsFetcher(t, getConfig()) + f := mbtest.NewEventsFetcher(t, getConfig("")) err := mbtest.WriteEvents(f, t) if err != nil { t.Fatal("write", err) @@ -32,17 +33,25 @@ func TestData(t *testing.T) { } func TestTopic(t *testing.T) { + if testing.Verbose() { + logp.LogInit(logp.LOG_DEBUG, "", false, true, []string{"kafka"}) + } + id := strconv.Itoa(rand.New(rand.NewSource(int64(time.Now().Nanosecond()))).Int()) testTopic := fmt.Sprintf("test-metricbeat-%s", id) // Create initial topic generateKafkaData(t, testTopic) - f := mbtest.NewEventsFetcher(t, getConfig()) + f := mbtest.NewEventsFetcher(t, getConfig(testTopic)) dataBefore, err := f.Fetch() if err != nil { t.Fatal("write", err) } + if len(dataBefore) == 0 { + t.Errorf("No offsets fetched from topic (before): %v", testTopic) + } + t.Logf("before: %v", dataBefore) var n int64 = 10 var i int64 = 0 @@ -55,6 +64,10 @@ func TestTopic(t *testing.T) { if err != nil { t.Fatal("write", err) } + if len(dataAfter) == 0 { + t.Errorf("No offsets fetched from topic (after): %v", testTopic) + } + t.Logf("after: %v", dataAfter) // Checks that no new topics / partitions were added assert.True(t, len(dataBefore) == len(dataAfter)) @@ -84,7 +97,10 @@ func TestTopic(t *testing.T) { } func generateKafkaData(t *testing.T, topic string) { + t.Logf("Send Kafka Event to topic: %v", topic) + config := sarama.NewConfig() + config.Producer.Return.Successes = true client, err := sarama.NewClient([]string{getTestKafkaHost()}, config) if err != nil { t.Errorf("%s", err) @@ -109,11 +125,17 @@ func generateKafkaData(t *testing.T, topic string) { client.RefreshMetadata(topic) } -func getConfig() map[string]interface{} { +func getConfig(topic string) map[string]interface{} { + var topics []string + if topic != "" { + topics = []string{topic} + } + return map[string]interface{}{ "module": "kafka", "metricsets": []string{"partition"}, "hosts": []string{getTestKafkaHost()}, + "topics": topics, } } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kafka/version.go b/vendor/github.com/elastic/beats/metricbeat/module/kafka/version.go new file mode 100644 index 00000000..16cc9444 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kafka/version.go @@ -0,0 +1,62 @@ +package kafka + +import ( + "fmt" + + "github.com/Shopify/sarama" +) + +type Version struct { + String string +} + +var ( + minVersion = sarama.V0_8_2_0 + + kafkaVersions = map[string]sarama.KafkaVersion{ + "": sarama.V0_8_2_0, + + "0.8.2.0": sarama.V0_8_2_0, + "0.8.2.1": sarama.V0_8_2_1, + "0.8.2.2": sarama.V0_8_2_2, + "0.8.2": sarama.V0_8_2_2, + "0.8": sarama.V0_8_2_2, + + "0.9.0.0": sarama.V0_9_0_0, + "0.9.0.1": sarama.V0_9_0_1, + "0.9.0": sarama.V0_9_0_1, + "0.9": sarama.V0_9_0_1, + + "0.10.0.0": sarama.V0_10_0_0, + "0.10.0.1": sarama.V0_10_0_1, + "0.10.0": sarama.V0_10_0_1, + "0.10.1.0": sarama.V0_10_1_0, + "0.10.1": sarama.V0_10_1_0, + "0.10": sarama.V0_10_1_0, + } +) + +func (v *Version) Validate() error { + if _, ok := kafkaVersions[v.String]; !ok { + return fmt.Errorf("unknown/unsupported kafka vesion '%v'", v.String) + } + return nil +} + +func (v *Version) Unpack(s string) error { + tmp := Version{s} + if err := tmp.Validate(); err != nil { + return err + } + + *v = tmp + return nil +} + +func (v *Version) get() sarama.KafkaVersion { + if v, ok := kafkaVersions[v.String]; ok { + return v + } + + return minVersion +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/mysql/status/_meta/data.json b/vendor/github.com/elastic/beats/metricbeat/module/mysql/status/_meta/data.json index 2bf5175a..c5b2ca25 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/mysql/status/_meta/data.json +++ b/vendor/github.com/elastic/beats/metricbeat/module/mysql/status/_meta/data.json @@ -26,6 +26,12 @@ "received": 992, "sent": 40657 }, + "command": { + "delete": 0, + "insert": 0, + "select": 9, + "update": 0 + }, "connections": 11, "created": { "tmp": { diff --git a/vendor/github.com/elastic/beats/metricbeat/module/mysql/status/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/mysql/status/_meta/fields.yml index 57f23392..bc9c5535 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/mysql/status/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/mysql/status/_meta/fields.yml @@ -135,3 +135,23 @@ - name: opened_tables type: long description: > + + - name: command + type: group + description: > + fields: + - name: delete + type: long + description: > + + - name: insert + type: long + description: > + + - name: select + type: long + description: > + + - name: update + type: long + description: > \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/metricbeat/module/mysql/status/data.go b/vendor/github.com/elastic/beats/metricbeat/module/mysql/status/data.go index 274c2aa9..c3430ada 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/mysql/status/data.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/mysql/status/data.go @@ -49,6 +49,12 @@ var ( "tables": c.Int("Open_tables"), }, "opened_tables": c.Int("Opened_tables"), + "command": s.Object{ + "delete": c.Int("Com_delete"), + "insert": c.Int("Com_insert"), + "select": c.Int("Com_select"), + "update": c.Int("Com_update"), + }, } ) diff --git a/vendor/github.com/elastic/beats/metricbeat/module/prometheus/_meta/config.yml b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/_meta/config.yml new file mode 100644 index 00000000..570b96e9 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/_meta/config.yml @@ -0,0 +1,7 @@ +#- module: prometheus + #metricsets: ["stats"] + #enabled: true + #period: 10s + #hosts: ["localhost:9090"] + #metrics_path: /metrics + #namespace: example diff --git a/vendor/github.com/elastic/beats/metricbeat/module/prometheus/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/_meta/docs.asciidoc new file mode 100644 index 00000000..c641bcc1 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/_meta/docs.asciidoc @@ -0,0 +1,6 @@ +== Prometheus Module + +beta[] + +This module periodically fetches metrics from +https://prometheus.io/docs/[Prometheus]. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/prometheus/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/_meta/fields.yml new file mode 100644 index 00000000..ebdf87ff --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/_meta/fields.yml @@ -0,0 +1,12 @@ +- key: prometheus + title: "Prometheus" + description: > + beta[] + + Stats collected from Prometheus. + short_config: false + fields: + - name: prometheus + type: group + description: > + fields: diff --git a/vendor/github.com/elastic/beats/metricbeat/module/prometheus/collector/_meta/data.json b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/collector/_meta/data.json new file mode 100644 index 00000000..76ac9fe5 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/collector/_meta/data.json @@ -0,0 +1,23 @@ +{ + "@timestamp": "2016-05-23T08:05:34.853Z", + "beat": { + "hostname": "host.example.com", + "name": "host.example.com" + }, + "metricset": { + "host": "127.0.0.1:9090", + "module": "prometheus", + "name": "collector", + "namespace": "collector", + "rtt": 115 + }, + "prometheus": { + "collector": { + "label": { + "type": "quarantine_completed" + }, + "prometheus_local_storage_series_ops_total": 0 + } + }, + "type": "metricsets" +} \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/metricbeat/module/prometheus/collector/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/collector/_meta/docs.asciidoc new file mode 100644 index 00000000..982025ba --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/collector/_meta/docs.asciidoc @@ -0,0 +1,7 @@ +=== Prometheus Collector Metricset + +The Prometheus `collector` metricset fetches data from https://prometheus.io/docs/instrumenting/exporters/[prometheus exporters]. + +All events with the same labels are grouped together as one event. The fields +exported by this metricset vary depending on the Prometheus exporter that you're +using. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/prometheus/collector/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/collector/_meta/fields.yml new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/elastic/beats/metricbeat/module/prometheus/collector/collector.go b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/collector/collector.go new file mode 100644 index 00000000..ac7cc8e3 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/collector/collector.go @@ -0,0 +1,112 @@ +package collector + +import ( + "bufio" + "fmt" + "net/http" + + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/logp" + "github.com/elastic/beats/metricbeat/mb" + "github.com/elastic/beats/metricbeat/mb/parse" +) + +const ( + defaultScheme = "http" + defaultPath = "/metrics" +) + +var ( + debugf = logp.MakeDebug("prometheus-collector") + + hostParser = parse.URLHostParserBuilder{ + DefaultScheme: defaultScheme, + DefaultPath: defaultPath, + PathConfigKey: "metrics_path", + }.Build() +) + +func init() { + if err := mb.Registry.AddMetricSet("prometheus", "collector", New, hostParser); err != nil { + panic(err) + } +} + +type MetricSet struct { + mb.BaseMetricSet + client *http.Client + namespace string +} + +func New(base mb.BaseMetricSet) (mb.MetricSet, error) { + logp.Warn("EXPERIMENTAL: The prometheus collector metricset is experimental") + + config := struct { + Namespace string `config:"namespace" validate:"required"` + }{} + err := base.Module().UnpackConfig(&config) + if err != nil { + return nil, err + } + + return &MetricSet{ + BaseMetricSet: base, + client: &http.Client{Timeout: base.Module().Config().Timeout}, + namespace: config.Namespace, + }, nil +} + +func (m *MetricSet) Fetch() ([]common.MapStr, error) { + + req, err := http.NewRequest("GET", m.HostData().SanitizedURI, nil) + if m.HostData().User != "" || m.HostData().Password != "" { + req.SetBasicAuth(m.HostData().User, m.HostData().Password) + } + resp, err := m.client.Do(req) + if err != nil { + return nil, fmt.Errorf("error making http request: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + return nil, fmt.Errorf("HTTP error %d: %s", resp.StatusCode, resp.Status) + } + + eventList := map[string]common.MapStr{} + scanner := bufio.NewScanner(resp.Body) + + // Iterate through all events to gather data + for scanner.Scan() { + line := scanner.Text() + // Skip comment lines + if line[0] == '#' { + continue + } + + promEvent := NewPromEvent(line) + if promEvent.value == nil { + continue + } + + // If MapString for this label group does not exist yet, it is created + if _, ok := eventList[promEvent.labelHash]; !ok { + eventList[promEvent.labelHash] = common.MapStr{} + + // Add labels + if len(promEvent.labels) > 0 { + eventList[promEvent.labelHash]["label"] = promEvent.labels + } + + } + eventList[promEvent.labelHash][promEvent.key] = promEvent.value + } + + // Converts hash list to slice + events := []common.MapStr{} + for _, e := range eventList { + e["_namespace"] = m.namespace + events = append(events, e) + } + + return events, err +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/prometheus/collector/collector_integration_test.go b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/collector/collector_integration_test.go new file mode 100644 index 00000000..6d0bdbe2 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/collector/collector_integration_test.go @@ -0,0 +1,61 @@ +// +build integration + +package collector + +import ( + "os" + "testing" + + mbtest "github.com/elastic/beats/metricbeat/mb/testing" + + "github.com/stretchr/testify/assert" +) + +// These tests are running with prometheus metrics as an example as this container is already available +// Every prometheus exporter should work here. + +func TestFetch(t *testing.T) { + f := mbtest.NewEventsFetcher(t, getConfig()) + event, err := f.Fetch() + if !assert.NoError(t, err) { + t.FailNow() + } + + t.Logf("%s/%s event: %+v", f.Module().Name(), f.Name(), event) +} + +func TestData(t *testing.T) { + f := mbtest.NewEventsFetcher(t, getConfig()) + + err := mbtest.WriteEvents(f, t) + if err != nil { + t.Fatal("write", err) + } +} + +func getConfig() map[string]interface{} { + return map[string]interface{}{ + "module": "prometheus", + "metricsets": []string{"collector"}, + "hosts": []string{getPrometheusEnvHost() + ":" + getPrometheusEnvPort()}, + "namespace": "collector", + } +} + +func getPrometheusEnvHost() string { + host := os.Getenv("PROMETHEUS_HOST") + + if len(host) == 0 { + host = "127.0.0.1" + } + return host +} + +func getPrometheusEnvPort() string { + port := os.Getenv("PROMETHEUS_PORT") + + if len(port) == 0 { + port = "9090" + } + return port +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/prometheus/collector/collector_test.go b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/collector/collector_test.go new file mode 100644 index 00000000..c6a1954d --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/collector/collector_test.go @@ -0,0 +1,60 @@ +// +build !integration + +package collector + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/libbeat/common" +) + +func TestDecodeLine(t *testing.T) { + tests := []struct { + Line string + Event PromEvent + }{ + { + Line: `http_request_duration_microseconds{handler="query",quantile="0.99"} 17`, + Event: PromEvent{ + key: "http_request_duration_microseconds", + value: int64(17), + labelHash: `handler="query",quantile="0.99"`, + labels: common.MapStr{ + "handler": "query", + "quantile": 0.99, + }, + }, + }, + { + Line: `http_request_duration_microseconds{handler="query",quantile="0.99"} NaN`, + Event: PromEvent{ + key: "http_request_duration_microseconds", + value: nil, + labelHash: `handler="query",quantile="0.99"`, + labels: common.MapStr{ + "handler": "query", + "quantile": 0.99, + }, + }, + }, + { + Line: `http_request_duration_microseconds{handler="query",quantile="0.99"} 13.2`, + Event: PromEvent{ + key: "http_request_duration_microseconds", + value: 13.2, + labelHash: `handler="query",quantile="0.99"`, + labels: common.MapStr{ + "handler": "query", + "quantile": 0.99, + }, + }, + }, + } + + for _, test := range tests { + event := NewPromEvent(test.Line) + assert.Equal(t, event, test.Event) + } +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/prometheus/collector/data.go b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/collector/data.go new file mode 100644 index 00000000..0f39e1d0 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/collector/data.go @@ -0,0 +1,82 @@ +package collector + +import ( + "strconv" + "strings" + + "github.com/elastic/beats/libbeat/common" +) + +type PromEvent struct { + key string + value interface{} + labels common.MapStr + labelHash string +} + +// NewPromEvent creates a prometheus event based on the given string +func NewPromEvent(line string) PromEvent { + // Separate key and value + split := strings.Split(line, " ") + + promEvent := PromEvent{ + key: split[0], + labelHash: "_", // _ represents empty labels + } + + // skip entries without a value + if split[1] == "NaN" { + promEvent.value = nil + } else { + promEvent.value = convertValue(split[1]) + } + + // Split key + startLabels := strings.Index(line, "{") + endLabels := strings.Index(line, "}") + + // Handle labels + if startLabels != -1 { + // Overwrite key, as key contained labels until now too + promEvent.key = line[0:startLabels] + promEvent.labelHash = line[startLabels+1 : endLabels] + // Extract labels + promEvent.labels = extractLabels(promEvent.labelHash) + } + + return promEvent +} + +// extractLabels splits up a label string of format handler="alerts",quantile="0.5" +// into a key / value list +func extractLabels(labelsString string) common.MapStr { + + keyValuePairs := common.MapStr{} + + // Extract labels + labels := strings.Split(labelsString, ",") + for _, label := range labels { + keyValue := strings.Split(label, "=") + // Remove " from value + keyValue[1] = keyValue[1][1 : len(keyValue[1])-1] + + // Converts value to int or float if needed + keyValuePairs[keyValue[0]] = convertValue(keyValue[1]) + } + + return keyValuePairs +} + +// convertValue takes the input string and converts it to int of float +func convertValue(value string) interface{} { + + if i, err := strconv.ParseInt(value, 10, 64); err == nil { + return i + } + + if f, err := strconv.ParseFloat(value, 64); err == nil { + return f + } + + return value +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/prometheus/doc.go b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/doc.go new file mode 100644 index 00000000..1324b955 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/doc.go @@ -0,0 +1,4 @@ +/* +Package prometheus is a Metricbeat module that contains MetricSets. +*/ +package prometheus diff --git a/vendor/github.com/elastic/beats/metricbeat/module/prometheus/stats/_meta/data.json b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/stats/_meta/data.json new file mode 100644 index 00000000..22c8d351 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/stats/_meta/data.json @@ -0,0 +1,28 @@ +{ + "@timestamp": "2016-05-23T08:05:34.853Z", + "beat": { + "hostname": "host.example.com", + "name": "host.example.com" + }, + "metricset": { + "host": "127.0.0.1:9090", + "module": "prometheus", + "name": "stats", + "rtt": 115 + }, + "prometheus": { + "stats": { + "notifications": { + "dropped": 0, + "queue_length": 0 + }, + "processes": { + "open_fds": 24 + }, + "storage": { + "chunks_to_persist": 465 + } + } + }, + "type": "metricsets" +} \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/metricbeat/module/prometheus/stats/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/stats/_meta/docs.asciidoc new file mode 100644 index 00000000..6da0ba14 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/stats/_meta/docs.asciidoc @@ -0,0 +1,4 @@ +=== Prometheus Stats Metricset + +The Prometheus `stats` metricset collects statistics about the Prometheus +server. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/prometheus/stats/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/stats/_meta/fields.yml new file mode 100644 index 00000000..ff4c7a06 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/stats/_meta/fields.yml @@ -0,0 +1,26 @@ +- name: stats + type: group + description: > + Stats about the Prometheus server. + fields: + - name: notifications + type: group + description: > + Notification stats. + fields: + - name: queue_length + type: long + description: > + Current queue length. + - name: dropped + type: long + description: > + Number of dropped queue events. + - name: processes.open_fds + type: long + description: > + Number of open file descriptors. + - name: storage.chunks_to_persist + type: long + description: > + Number of memory chunks that are not yet persisted to disk. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/prometheus/stats/data.go b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/stats/data.go new file mode 100644 index 00000000..f1ea2a02 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/stats/data.go @@ -0,0 +1,26 @@ +package stats + +import ( + "github.com/elastic/beats/libbeat/common" + s "github.com/elastic/beats/metricbeat/schema" + c "github.com/elastic/beats/metricbeat/schema/mapstrstr" +) + +var ( + schema = s.Schema{ + "notifications": s.Object{ + "queue_length": c.Int("prometheus_notifications_queue_length"), + "dropped": c.Int("prometheus_notifications_dropped_total"), + }, + "processes": s.Object{ + "open_fds": c.Int("process_open_fds"), + }, + "storage": s.Object{ + "chunks_to_persist": c.Int("prometheus_local_storage_chunks_to_persist"), + }, + } +) + +func eventMapping(entries map[string]interface{}) (common.MapStr, error) { + return schema.Apply(entries), nil +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/prometheus/stats/stats.go b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/stats/stats.go new file mode 100644 index 00000000..1429f6c7 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/stats/stats.go @@ -0,0 +1,84 @@ +package stats + +import ( + "bufio" + "fmt" + "net/http" + "strings" + + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/logp" + "github.com/elastic/beats/metricbeat/mb" + "github.com/elastic/beats/metricbeat/mb/parse" +) + +const ( + defaultScheme = "http" + defaultPath = "/metrics" +) + +var ( + debugf = logp.MakeDebug("prometheus-stats") + + hostParser = parse.URLHostParserBuilder{ + DefaultScheme: defaultScheme, + DefaultPath: defaultPath, + }.Build() +) + +func init() { + if err := mb.Registry.AddMetricSet("prometheus", "stats", New, hostParser); err != nil { + panic(err) + } +} + +type MetricSet struct { + mb.BaseMetricSet + client *http.Client +} + +func New(base mb.BaseMetricSet) (mb.MetricSet, error) { + logp.Warn("EXPERIMENTAL: The prometheus stats metricset is experimental") + + return &MetricSet{ + BaseMetricSet: base, + client: &http.Client{Timeout: base.Module().Config().Timeout}, + }, nil +} + +func (m *MetricSet) Fetch() (common.MapStr, error) { + + req, err := http.NewRequest("GET", m.HostData().SanitizedURI, nil) + if m.HostData().User != "" || m.HostData().Password != "" { + req.SetBasicAuth(m.HostData().User, m.HostData().Password) + } + resp, err := m.client.Do(req) + if err != nil { + return nil, fmt.Errorf("error making http request: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + return nil, fmt.Errorf("HTTP error %d: %s", resp.StatusCode, resp.Status) + } + + scanner := bufio.NewScanner(resp.Body) + + entries := map[string]interface{}{} + + // Iterate through all events to gather data + for scanner.Scan() { + line := scanner.Text() + + // Skip comments and calculated lines + if line[0] == '#' || strings.Contains(line, "quantile=") { + continue + } + split := strings.Split(line, " ") + entries[split[0]] = split[1] + } + + data, err := eventMapping(entries) + + return data, err +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/prometheus/stats/stats_integration_test.go b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/stats/stats_integration_test.go new file mode 100644 index 00000000..5c1f70a5 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/stats/stats_integration_test.go @@ -0,0 +1,62 @@ +// +build integration + +package stats + +import ( + "os" + "testing" + + "github.com/elastic/beats/libbeat/common" + mbtest "github.com/elastic/beats/metricbeat/mb/testing" + + "github.com/stretchr/testify/assert" +) + +func TestFetch(t *testing.T) { + f := mbtest.NewEventFetcher(t, getConfig()) + event, err := f.Fetch() + if !assert.NoError(t, err) { + t.FailNow() + } + + t.Logf("%s/%s event: %+v", f.Module().Name(), f.Name(), event) + + // Check number of fields. + assert.Equal(t, 3, len(event)) + assert.True(t, event["processes"].(common.MapStr)["open_fds"].(int64) > 0) +} + +func TestData(t *testing.T) { + f := mbtest.NewEventFetcher(t, getConfig()) + + err := mbtest.WriteEvent(f, t) + if err != nil { + t.Fatal("write", err) + } +} + +func getConfig() map[string]interface{} { + return map[string]interface{}{ + "module": "prometheus", + "metricsets": []string{"stats"}, + "hosts": []string{getPrometheusEnvHost() + ":" + getPrometheusEnvPort()}, + } +} + +func getPrometheusEnvHost() string { + host := os.Getenv("PROMETHEUS_HOST") + + if len(host) == 0 { + host = "127.0.0.1" + } + return host +} + +func getPrometheusEnvPort() string { + port := os.Getenv("PROMETHEUS_PORT") + + if len(port) == 0 { + port = "9090" + } + return port +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/redis/_meta/config.full.yml b/vendor/github.com/elastic/beats/metricbeat/module/redis/_meta/config.full.yml index e6572422..b221db7e 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/redis/_meta/config.full.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/redis/_meta/config.full.yml @@ -6,9 +6,6 @@ # Redis hosts #hosts: ["127.0.0.1:6379"] - # Enabled defines if the module is enabled. Default: true - #enabled: true - # Timeout after which time a metricset should return an error # Timeout is by default defined as period, as a fetch of a metricset # should never take longer then period, as otherwise calls can pile up. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/_meta/config.full.yml b/vendor/github.com/elastic/beats/metricbeat/module/system/_meta/config.full.yml index dd4050d5..a244aa15 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/_meta/config.full.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/_meta/config.full.yml @@ -26,6 +26,9 @@ # Per process stats - process + + # Sockets and connection info (linux only) + #- socket enabled: true period: 10s processes: ['.*'] @@ -35,3 +38,8 @@ # EXPERIMENTAL: cgroups can be enabled for the process metricset. #cgroups: false + + # Configure reverse DNS lookup on remote IP addresses in the socket metricset. + #socket.reverse_lookup.enabled: false + #socket.reverse_lookup.success_ttl: 60s + #socket.reverse_lookup.failure_ttl: 60s diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/_meta/config.yml b/vendor/github.com/elastic/beats/metricbeat/module/system/_meta/config.yml index a4548f82..474776c1 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/_meta/config.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/_meta/config.yml @@ -26,6 +26,9 @@ # Per process stats - process + + # Sockets (linux only) + #- socket enabled: true period: 10s processes: ['.*'] diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/system/_meta/docs.asciidoc index 037adcda..bcf32a6c 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/_meta/docs.asciidoc @@ -43,6 +43,9 @@ metricbeat.modules: cpu_ticks: true ---- +It is strongly recommended to not run docker metricsets with a period smaller then 3 seconds. The request to the docker +API already takes up to 2s seconds. Otherwise all the requests would timeout and no data is reported. + [float] === Dashboard diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/cpu/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/system/cpu/_meta/fields.yml index 054ab19b..abb3e03e 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/cpu/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/cpu/_meta/fields.yml @@ -3,6 +3,11 @@ description: > `cpu` contains local CPU stats. fields: + - name: cores + type: long + description: > + The number of CPU cores. + - name: user.pct type: scaled_float format: percent diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/cpu/cpu.go b/vendor/github.com/elastic/beats/metricbeat/module/system/cpu/cpu.go index 62ed8664..a07a261a 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/cpu/cpu.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/cpu/cpu.go @@ -52,7 +52,10 @@ func (m *MetricSet) Fetch() (common.MapStr, error) { } m.cpu.AddCpuPercentage(stat) + cpuCores := GetCores() + cpuStat := common.MapStr{ + "cores": cpuCores, "user": common.MapStr{ "pct": stat.UserPercent, }, diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/cpu/helper.go b/vendor/github.com/elastic/beats/metricbeat/module/system/cpu/helper.go index f97776c0..391fed94 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/cpu/helper.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/cpu/helper.go @@ -3,6 +3,8 @@ package cpu import ( + "runtime" + "github.com/elastic/beats/metricbeat/module/system" sigar "github.com/elastic/gosigar" ) @@ -12,6 +14,7 @@ type CPU struct { LastCpuTimes *CpuTimes LastCpuTimesList []CpuTimes CpuTicks bool + Cores int } type CpuTimes struct { @@ -115,6 +118,11 @@ func GetCpuPercentageList(last, current []CpuTimes) []CpuTimes { return current } +func GetCores() int { + cores := runtime.NumCPU() + return cores +} + func (cpu *CPU) AddCpuPercentage(t2 *CpuTimes) { cpu.LastCpuTimes = GetCpuPercentage(cpu.LastCpuTimes, t2) } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/load/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/system/load/_meta/docs.asciidoc index d043fd5f..5dbebca6 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/load/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/load/_meta/docs.asciidoc @@ -1,4 +1,4 @@ -=== system load MetricSet +=== System Load Metricset The System `load` metricset provides load statistics. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/process/helper.go b/vendor/github.com/elastic/beats/metricbeat/module/system/process/helper.go index c292a9b9..96d11914 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/process/helper.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/process/helper.go @@ -34,11 +34,10 @@ type Process struct { } type ProcStats struct { - ProcStats bool - Procs []string - regexps []*regexp.Regexp - ProcsMap ProcsMap - CpuTicks bool + Procs []string + regexps []*regexp.Regexp + ProcsMap ProcsMap + CpuTicks bool } // newProcess creates a new Process object based on the state information. @@ -124,7 +123,6 @@ func getProcFDUsage(pid int) (*sigar.ProcFDUsage, error) { func GetProcMemPercentage(proc *Process, totalPhyMem uint64) float64 { // in unit tests, total_phymem is set to a value greater than zero - if totalPhyMem == 0 { memStat, err := memory.GetMemory() if err != nil { @@ -260,25 +258,6 @@ func (procStats *ProcStats) InitProcStats() error { procStats.regexps = append(procStats.regexps, reg) } - pids, err := Pids() - if err != nil { - logp.Warn("Getting the initial list of pids: %v", err) - } - - for _, pid := range pids { - process, err := newProcess(pid) - if err != nil { - logp.Debug("metricbeat", "Skip process pid=%d: %v", pid, err) - continue - } - err = process.getDetails("") - if err != nil { - logp.Err("Error getting process details pid=%d: %v", pid, err) - continue - } - procStats.ProcsMap[process.Pid] = process - } - return nil } @@ -329,26 +308,6 @@ func (procStats *ProcStats) GetProcStats() ([]common.MapStr, error) { return processes, nil } -func (procStats *ProcStats) GetProcStatsEvents() ([]common.MapStr, error) { - processes, err := procStats.GetProcStats() - if err != nil { - return nil, err - } - - events := make([]common.MapStr, len(processes)) - for _, proc := range processes { - event := common.MapStr{ - "@timestamp": common.Time(time.Now()), - "type": "process", - "proc": proc, - } - - events = append(events, event) - } - - return events, nil -} - // unixTimeMsToTime converts a unix time given in milliseconds since Unix epoch // to a common.Time value. func unixTimeMsToTime(unixTimeMs uint64) common.Time { diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/process/process.go b/vendor/github.com/elastic/beats/metricbeat/module/system/process/process.go index 517265c6..fbcf95a0 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/process/process.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/process/process.go @@ -48,8 +48,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { m := &MetricSet{ BaseMetricSet: base, stats: &ProcStats{ - ProcStats: true, - Procs: config.Procs, + Procs: config.Procs, }, } err := m.stats.InitProcStats() diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/socket/_meta/data.json b/vendor/github.com/elastic/beats/metricbeat/module/system/socket/_meta/data.json new file mode 100644 index 00000000..c17ffaff --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/socket/_meta/data.json @@ -0,0 +1,33 @@ +{ + "@timestamp": "2016-05-23T08:05:34.853Z", + "beat": { + "hostname": "host.example.com", + "name": "host.example.com" + }, + "metricset": { + "module": "system", + "name": "socket", + "rtt": 115 + }, + "system": { + "socket": { + "direction": "listening", + "family": "ipv6", + "local": { + "ip": "::", + "port": 42061 + }, + "process": { + "cmdline": "/tmp/go-build519327234/github.com/elastic/beats/metricbeat/module/system/socket/_test/socket.test -test.v=true -data", + "command": "socket.test", + "exe": "/tmp/go-build519327234/github.com/elastic/beats/metricbeat/module/system/socket/_test/socket.test", + "pid": 2025 + }, + "user": { + "id": 0, + "name": "root" + } + } + }, + "type": "metricsets" +} \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/socket/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/system/socket/_meta/docs.asciidoc new file mode 100644 index 00000000..86868b3f --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/socket/_meta/docs.asciidoc @@ -0,0 +1,45 @@ +=== System Socket Metricset + +beta[] + +This metricset is available on Linux only and requires kernel 2.6.14 or newer. + +The system `socket` metricset reports an event for each new TCP socket that it +sees. It does this by polling the kernel periodically to get a dump of all +sockets. You set the polling interval by configuring the `period` option. +Specifying a short polling interval with this metricset is important to avoid +missing short-lived connections. For example: + +[source,yaml] +--- +metricbeat.modules: +- module: system + metricsets: [cpu, memory] +- module: system + metricsets: [socket] <1> + period: 1s +--- + +<1> You can configure the `socket` metricset separately to specify a different +`period` value than the other metricsets. + +The metricset reports the process that has the socket open. In order to provide +this information, Metricbeat must be running as root. Root access is also +required to read the file descriptor information of other processes. + +You can configure the metricset to perform a reverse lookup on the remote IP, +and the returned hostname will be added to the event and cached. If a hostname +is found, then the eTLD+1 (effective top-level domain plus one level) value will +also be added to the event. Reverse lookups are disabled by default. + +The following example shows the full configuration for the metricset along with +the defaults. + +[source,yaml] +--- +- module: system + metricsets: [socket] + socket.reverse_lookup.enabled: false + socket.reverse_lookup.success_ttl: 60s + socket.reverse_lookup.failure_ttl: 60s +--- diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/socket/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/system/socket/_meta/fields.yml new file mode 100644 index 00000000..d3c5a476 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/socket/_meta/fields.yml @@ -0,0 +1,91 @@ +- name: socket + type: group + description: > + TCP sockets that are active. + fields: + - name: direction + type: keyword + example: incoming + description: > + How the socket was initiated. Possible values are incoming, outgoing, + or listening. + + - name: family + type: keyword + example: ipv4 + description: > + Address family. + + - name: local.ip + type: ip + example: 192.0.2.1 or 2001:0DB8:ABED:8536::1 + description: > + Local IP address. This can be an IPv4 or IPv6 address. + + - name: local.port + type: long + example: 22 + description: > + Local port. + + - name: remote.ip + type: ip + example: 192.0.2.1 or 2001:0DB8:ABED:8536::1 + description: > + Remote IP address. This can be an IPv4 or IPv6 address. + + - name: remote.port + type: long + example: 22 + description: > + Remote port. + + - name: remote.host + type: keyword + example: 76-211-117-36.nw.example.com. + description: > + PTR record associated with the remote IP. It is obtained via reverse + IP lookup. + + - name: remote.etld_plus_one + type: keyword + example: example.com. + description: > + The effective top-level domain (eTLD) of the remote host plus one more + label. For example, the eTLD+1 for "foo.bar.golang.org." is "golang.org.". + The data for determining the eTLD comes from an embedded copy of the data + from http://publicsuffix.org. + + - name: remote.host_error + type: keyword + description: > + Error describing the cause of the reverse lookup failure. + + - name: process.pid + type: long + description: > + ID of the process that opened the socket. + + - name: process.command + type: keyword + description: > + Name of the command (limited to 20 chars by the OS). + + - name: process.cmdline + type: keyword + description: > + + - name: process.exe + type: keyword + description: > + Absolute path to the executable. + + - name: user.id + type: long + description: > + UID of the user running the process. + + - name: user.name + type: keyword + description: > + Name of the user running the process. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/socket/config.go b/vendor/github.com/elastic/beats/metricbeat/module/system/socket/config.go new file mode 100644 index 00000000..fbace279 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/socket/config.go @@ -0,0 +1,31 @@ +package socket + +import "time" + +// Config is the configuration specific to the socket MetricSet. +type Config struct { + ReverseLookup *ReverseLookupConfig `config:"socket.reverse_lookup"` +} + +// ReverseLookupConfig contains the configuration that controls the reverse +// DNS lookup behavior. +type ReverseLookupConfig struct { + Enabled *bool `config:"enabled"` + SuccessTTL time.Duration `config:"success_ttl"` + FailureTTL time.Duration `config:"failure_ttl"` +} + +// IsEnabled returns true if reverse_lookup is defined and 'enabled' is either +// not set or set to true. +func (c *ReverseLookupConfig) IsEnabled() bool { + return c != nil && (c.Enabled == nil || *c.Enabled) +} + +const ( + defSuccessTTL = 60 * time.Second + defFailureTTL = 60 * time.Second +) + +var defaultConfig = Config{ + ReverseLookup: nil, +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/socket/doc.go b/vendor/github.com/elastic/beats/metricbeat/module/system/socket/doc.go new file mode 100644 index 00000000..84a2bef2 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/socket/doc.go @@ -0,0 +1,4 @@ +/* +Package socket captures active sockets and the processes that own them. +*/ +package socket diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/socket/hashset.go b/vendor/github.com/elastic/beats/metricbeat/module/system/socket/hashset.go new file mode 100644 index 00000000..221ea8c7 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/socket/hashset.go @@ -0,0 +1,23 @@ +package socket + +// hashSet is a simple set built upon a map. +type hashSet map[uint64]struct{} + +// Add adds a value to the set. +func (s hashSet) Add(hash uint64) { + s[hash] = struct{}{} +} + +// Contains return true if the value is in the set. +func (s hashSet) Contains(hash uint64) bool { + _, exists := s[hash] + return exists +} + +// Reset resets the contents of the set to empty and returns itself. +func (s hashSet) Reset() hashSet { + for k := range s { + delete(s, k) + } + return s +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/socket/hashset_test.go b/vendor/github.com/elastic/beats/metricbeat/module/system/socket/hashset_test.go new file mode 100644 index 00000000..cd0d0d10 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/socket/hashset_test.go @@ -0,0 +1,18 @@ +package socket + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestHashSet(t *testing.T) { + set := hashSet{} + + set.Add(10) + assert.True(t, set.Contains(10)) + assert.False(t, set.Contains(0)) + + set.Reset() + assert.Len(t, set, 0) +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/socket/listeners.go b/vendor/github.com/elastic/beats/metricbeat/module/system/socket/listeners.go new file mode 100644 index 00000000..8c2306c8 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/socket/listeners.go @@ -0,0 +1,127 @@ +package socket + +import ( + "net" +) + +// Direction indicates how a socket was initiated. +type Direction uint8 + +const ( + _ Direction = iota + // Incoming indicates a connection was established from the outside to + // listening socket on this host. + Incoming + // Outgoing indicates a connection was established from this socket to an + // external listening socket. + Outgoing + // Listening indicates a socket that is listening. + Listening +) + +var directionNames = map[Direction]string{ + Incoming: "incoming", + Outgoing: "outgoing", + Listening: "listening", +} + +func (d Direction) String() string { + if name, exists := directionNames[d]; exists { + return name + } + return "unknown" +} + +// ipList is a list of IP addresses. +type ipList struct { + ips []net.IP +} + +func (l *ipList) put(ip net.IP) { l.ips = append(l.ips, ip) } + +// portTable is a mapping of port number to listening IP addresses. +type portTable map[int]*ipList + +// protocolTable is a mapping of protocol numbers to listening ports. +type protocolTable map[uint8]portTable + +// ListenerTable tracks sockets that are listening. It can then be used to +// identify if a socket is listening, incoming, or outgoing. +type ListenerTable struct { + data protocolTable +} + +// NewListenerTable returns a new ListenerTable. +func NewListenerTable() *ListenerTable { + return &ListenerTable{ + data: protocolTable{}, + } +} + +// Reset resets all data in the table. +func (t *ListenerTable) Reset() { + for _, ports := range t.data { + for port := range ports { + delete(ports, port) + } + } +} + +// Put puts a new listening address into the table. +func (t *ListenerTable) Put(proto uint8, ip net.IP, port int) { + ports, exists := t.data[proto] + if !exists { + ports = portTable{} + t.data[proto] = ports + } + + // Add port + addr to table. + interfaces, exists := ports[port] + if !exists { + interfaces = &ipList{} + ports[port] = interfaces + } + interfaces.put(ip) +} + +// Direction returns whether the connection was incoming or outgoing based on +// the protocol and local address. It compares the given local address to the +// listeners in the table for the protocol and returns Incoming if there is a +// match. If remotePort is 0 then Listening is returned. +func (t *ListenerTable) Direction( + proto uint8, + localIP net.IP, localPort int, + remoteIP net.IP, remotePort int, +) Direction { + if remotePort == 0 { + return Listening + } + + // Are there any listeners on the given protocol? + ports, exists := t.data[proto] + if !exists { + return Outgoing + } + + // Is there any listener on the port? + interfaces, exists := ports[localPort] + if !exists { + return Outgoing + } + + // Is there a listener that specific interface? OR + // Is there a listener on the "any" address (0.0.0.0 or ::)? + isIPv4 := localIP.To4() != nil + for _, ip := range interfaces.ips { + switch { + case ip.Equal(localIP): + return Incoming + case ip.Equal(net.IPv4zero) && isIPv4: + return Incoming + case ip.Equal(net.IPv6zero) && !isIPv4: + return Incoming + } + } + + return Outgoing +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/socket/listeners_test.go b/vendor/github.com/elastic/beats/metricbeat/module/system/socket/listeners_test.go new file mode 100644 index 00000000..e0dd0faf --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/socket/listeners_test.go @@ -0,0 +1,43 @@ +package socket + +import ( + "net" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestListenerTable(t *testing.T) { + l := NewListenerTable() + + proto := uint8(4) + lAddr := net.ParseIP("192.0.2.1") + httpPort := 80 + rAddr := net.ParseIP("198.18.0.1") + ephemeralPort := 48199 + ipv6Addr := net.ParseIP("2001:db8:fe80::217:f2ff:fe07:ed62") + + // Any socket with remote port of 0 is listening. + assert.Equal(t, Listening, l.Direction(proto, lAddr, httpPort, net.IPv4zero, 0)) + + // Listener on 192.0.2.1:80 + l.Put(proto, lAddr, httpPort) + + assert.Equal(t, Incoming, l.Direction(proto, lAddr, httpPort, rAddr, ephemeralPort)) + assert.Equal(t, Outgoing, l.Direction(0, lAddr, httpPort, rAddr, ephemeralPort)) + assert.Equal(t, Outgoing, l.Direction(proto, lAddr, ephemeralPort, rAddr, ephemeralPort)) + + // Listener on 0.0.0.0:80 + l.Reset() + l.Put(proto, net.IPv4zero, httpPort) + + assert.Equal(t, Incoming, l.Direction(proto, lAddr, httpPort, rAddr, ephemeralPort)) + assert.Equal(t, Outgoing, l.Direction(proto, ipv6Addr, httpPort, rAddr, ephemeralPort)) + + // Listener on :::80 + l.Reset() + l.Put(proto, net.IPv6zero, httpPort) + + assert.Equal(t, Incoming, l.Direction(proto, ipv6Addr, httpPort, rAddr, ephemeralPort)) + assert.Equal(t, Outgoing, l.Direction(proto, lAddr, httpPort, rAddr, ephemeralPort)) +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/socket/ptable.go b/vendor/github.com/elastic/beats/metricbeat/module/system/socket/ptable.go new file mode 100644 index 00000000..1e77ab70 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/socket/ptable.go @@ -0,0 +1,133 @@ +package socket + +import ( + "os" + "strconv" + "strings" + + "github.com/elastic/procfs" + "github.com/joeshaw/multierror" +) + +// process tools + +// Proc contains static process information. +type Proc struct { + PID int + Command string + Executable string + CmdLine string +} + +// ProcTable contains all of the active processes (if the current user is root). +type ProcTable struct { + fs procfs.FS + procs map[int]*Proc + inodes map[uint32]*Proc + euid int +} + +// NewProcTable returns a new ProcTable that reads data from the /proc +// directory by default. An alternative proc filesystem mountpoint can be +// specified through the mountpoint parameter. +func NewProcTable(mountpoint string) (*ProcTable, error) { + if mountpoint == "" { + mountpoint = procfs.DefaultMountPoint + } + + fs, err := procfs.NewFS(mountpoint) + if err != nil { + return nil, err + } + + p := &ProcTable{fs: fs, euid: os.Geteuid()} + p.Refresh() + return p, nil +} + +// Refresh updates the process table with new processes and removes processes +// that have exited. It collects the PID, command, and socket inode information. +// If running as non-root, only information from the current process will be +// collected. +func (t *ProcTable) Refresh() error { + var err error + var procs []procfs.Proc + if t.euid == 0 { + procs, err = t.fs.AllProcs() + if err != nil { + return err + } + } else { + proc, err := t.fs.Self() + if err != nil { + return err + } + procs = append(procs, proc) + } + + var errs multierror.Errors + inodes := map[uint32]*Proc{} + cachedProcs := make(map[int]*Proc, len(procs)) + for _, p := range procs { + proc := t.procs[p.PID] + + // Cache miss. + if proc == nil { + proc = &Proc{PID: p.PID} + + if proc.Executable, err = p.Executable(); err != nil { + errs = append(errs, err) + } + if proc.Command, err = p.Comm(); err != nil { + errs = append(errs, err) + } + if cmdline, err := p.CmdLine(); err != nil { + errs = append(errs, err) + } else { + proc.CmdLine = strings.Join(cmdline, " ") + } + } + cachedProcs[proc.PID] = proc + + // Always update map socket inode to Proc. + socketInodes, err := socketInodes(&p) + if err != nil { + errs = append(errs, err) + continue + } + for _, inode := range socketInodes { + inodes[inode] = proc + } + } + + t.procs = cachedProcs + t.inodes = inodes + return errs.Err() +} + +func socketInodes(p *procfs.Proc) ([]uint32, error) { + fds, err := p.FileDescriptorTargets() + if err != nil { + return nil, err + } + + var inodes []uint32 + for _, fd := range fds { + if strings.HasPrefix(fd, "socket:[") { + inode, err := strconv.ParseInt(fd[8:len(fd)-1], 10, 64) + if err != nil { + continue + } + + inodes = append(inodes, uint32(inode)) + } + } + + return inodes, nil +} + +// ProcessBySocketInode returns the Proc associated with the given socket +// inode. +func (t *ProcTable) ProcessBySocketInode(inode uint32) *Proc { + return t.inodes[inode] +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/socket/reverse_lookup.go b/vendor/github.com/elastic/beats/metricbeat/module/system/socket/reverse_lookup.go new file mode 100644 index 00000000..4b847eef --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/socket/reverse_lookup.go @@ -0,0 +1,112 @@ +package socket + +import ( + "fmt" + "net" + "time" + + "golang.org/x/net/publicsuffix" +) + +type ptrRecord struct { + hostname string + error error + expires time.Time +} + +func (r ptrRecord) IsExpired(now time.Time) bool { + return now.After(r.expires) +} + +// ReverseLookupCache is a cache for storing and retrieving the results of +// reverse DNS queries. It caches the results of queries regardless of their +// outcome (success or failure). The result is cached for the amount of time +// specified by parameters and not based on the TTL from the PTR record. +type ReverseLookupCache struct { + data map[string]ptrRecord + successTTL, failureTTL time.Duration +} + +// NewReverseLookupCache returns a new cache. +func NewReverseLookupCache(successTTL, failureTTL time.Duration) *ReverseLookupCache { + c := &ReverseLookupCache{ + data: map[string]ptrRecord{}, + successTTL: successTTL, + failureTTL: failureTTL, + } + + return c +} + +// Cleanup removes expired entries from the cache. +func (c *ReverseLookupCache) Cleanup() { + now := time.Now() + for k, ptr := range c.data { + if ptr.IsExpired(now) { + delete(c.data, k) + } + } +} + +// Lookup performs a reverse lookup on the given IP address. A cached result +// will be returned if it is contained in the cache, otherwise a lookup is +// performed. +func (c ReverseLookupCache) Lookup(ip net.IP) (string, error) { + // Go doesn't expose a lookup method that accepts net.IP so + // unfortunately we must convert the IP to a string. + ipStr := ip.String() + + // XXX: This could be implemented using common.Cache with a separate + // cleanup thread. + c.Cleanup() + + // Check the cache. + now := time.Now() + if ptr, found := c.data[ipStr]; found && !ptr.IsExpired(now) { + return ptr.hostname, ptr.error + } + + // Do a new lookup. + names, err := net.LookupAddr(ipStr) + now = time.Now() + + var ptr ptrRecord + switch { + case err != nil: + ptr.expires = now.Add(c.failureTTL) + ptr.error = err + case len(names) == 0: + ptr.expires = now.Add(c.failureTTL) + ptr.error = fmt.Errorf("empty dns response") + default: + ptr.expires = now.Add(c.successTTL) + ptr.hostname = names[0] + } + + c.data[ipStr] = ptr + return ptr.hostname, ptr.error +} + +// etldPlusOne returns the effective top-level domain plus one domain for the +// given hostname. +func etldPlusOne(hostname string) (string, error) { + if hostname == "" { + return "", nil + } + + trimmed := false + if hostname[len(hostname)-1] == '.' { + hostname = hostname[:len(hostname)-1] + trimmed = true + } + + domain, err := publicsuffix.EffectiveTLDPlusOne(hostname) + if err != nil { + return "", err + } + + if trimmed { + return domain + ".", nil + } + return domain, nil +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/socket/socket.go b/vendor/github.com/elastic/beats/metricbeat/module/system/socket/socket.go new file mode 100644 index 00000000..4d23158d --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/socket/socket.go @@ -0,0 +1,301 @@ +// +build linux + +package socket + +import ( + "fmt" + "net" + "os" + "path/filepath" + "sync/atomic" + "syscall" + + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/logp" + "github.com/elastic/beats/metricbeat/mb" + "github.com/elastic/beats/metricbeat/mb/parse" + "github.com/elastic/beats/metricbeat/module/system" + "github.com/elastic/gosigar/sys/linux" + + "github.com/pkg/errors" +) + +var ( + debugSelector = "system.socket" + debugf = logp.MakeDebug(debugSelector) +) + +func init() { + if err := mb.Registry.AddMetricSet("system", "socket", New, parse.EmptyHostParser); err != nil { + panic(err) + } +} + +type MetricSet struct { + mb.BaseMetricSet + readBuffer []byte + seq uint32 + ptable *ProcTable + euid int + previousConns hashSet + currentConns hashSet + reverseLookup *ReverseLookupCache + listeners *ListenerTable + users UserCache +} + +func New(base mb.BaseMetricSet) (mb.MetricSet, error) { + c := defaultConfig + if err := base.Module().UnpackConfig(&c); err != nil { + return nil, err + } + + systemModule, ok := base.Module().(*system.Module) + if !ok { + return nil, errors.New("unexpected module type") + } + + ptable, err := NewProcTable(filepath.Join(systemModule.HostFS, "/proc")) + if err != nil { + return nil, err + } + if os.Geteuid() != 0 { + logp.Info("socket process info will only be available for " + + "metricbeat because the process is running as a non-root user") + } + + m := &MetricSet{ + BaseMetricSet: base, + readBuffer: make([]byte, os.Getpagesize()), + ptable: ptable, + euid: os.Geteuid(), + previousConns: hashSet{}, + currentConns: hashSet{}, + listeners: NewListenerTable(), + users: NewUserCache(), + } + + if c.ReverseLookup.IsEnabled() { + var successTTL, failureTTL = defSuccessTTL, defFailureTTL + if c.ReverseLookup.SuccessTTL != 0 { + successTTL = c.ReverseLookup.SuccessTTL + } + if c.ReverseLookup.FailureTTL != 0 { + successTTL = c.ReverseLookup.FailureTTL + } + debugf("enabled reverse DNS lookup with cache TTL of %v/%v", + successTTL, failureTTL) + m.reverseLookup = NewReverseLookupCache(successTTL, failureTTL) + } + + return m, nil +} + +func (m *MetricSet) Fetch() ([]common.MapStr, error) { + // Refresh inode to process mapping (must be root). + if err := m.ptable.Refresh(); err != nil { + debugf("process table refresh had failures: %v", err) + } + + // Send request over netlink and parse responses. + req := linux.NewInetDiagReq() + req.Header.Seq = atomic.AddUint32(&m.seq, 1) + sockets, err := linux.NetlinkInetDiagWithBuf(req, m.readBuffer, nil) + if err != nil { + return nil, errors.Wrap(err, "failed requesting socket dump") + } + debugf("netlink returned %d sockets", len(sockets)) + + // Filter sockets that were known during the previous poll. + sockets = m.filterAndRememberSockets(sockets) + + // Enrich sockets with direction/pid/process/user/hostname and convert to MapStr. + rtn := make([]common.MapStr, 0, len(sockets)) + for _, s := range sockets { + c := newConnection(s) + m.enrichConnectionData(c) + rtn = append(rtn, c.ToMapStr()) + } + + // Set the "previous" connections set to the "current" connections. + tmp := m.previousConns + m.previousConns = m.currentConns + m.currentConns = tmp.Reset() + + // Reset the listeners for the next iteration. + m.listeners.Reset() + + return rtn, nil +} + +// filterAndRememberSockets filters sockets to remove sockets that were seen +// during the last poll. It stores all of the sockets it sees for the next +// poll. +func (m *MetricSet) filterAndRememberSockets(sockets ...[]*linux.InetDiagMsg) []*linux.InetDiagMsg { + var newSockets []*linux.InetDiagMsg + for _, list := range sockets { + for _, socket := range list { + // Register all listening sockets. + if socket.DstPort() == 0 { + m.listeners.Put(uint8(syscall.IPPROTO_TCP), socket.SrcIP(), socket.SrcPort()) + } + + // Filter known sockets. + if m.isNewSocket(socket) { + if logp.IsDebug(debugSelector) { + debugf("found new socket %v:%v -> %v:%v with state=%v, inode=%v, hash-id=%d", + socket.SrcIP(), socket.SrcPort(), + socket.DstIP(), socket.DstPort(), + linux.TCPState(socket.State), socket.Inode, socket.FastHash()) + } + newSockets = append(newSockets, socket) + } + } + } + return newSockets +} + +// isNewSocket returns true if the socket is new since the last poll. +func (m *MetricSet) isNewSocket(diag *linux.InetDiagMsg) bool { + // Don't use the socket's inode for deduplication because once the socket + // is closing the inode goes to 0. + key := diag.FastHash() + m.currentConns.Add(key) + return !m.previousConns.Contains(key) +} + +// enrichConnectionData enriches the connection with username, direction, +// hostname of the remote IP (if enabled), eTLD + 1 of the hostname, and the +// process owning the socket. +func (m *MetricSet) enrichConnectionData(c *connection) { + c.Username = m.users.LookupUID(int(c.UID)) + + // Determine direction (incoming, outgoing, or listening). + c.Direction = m.listeners.Direction(uint8(syscall.IPPROTO_TCP), + c.LocalIP, c.LocalPort, c.RemoteIP, c.RemotePort) + + // Reverse DNS lookup on the remote IP. + if m.reverseLookup != nil && c.Direction != Listening { + hostname, err := m.reverseLookup.Lookup(c.RemoteIP) + if err != nil { + c.DestHostError = err + } else { + c.DestHost = hostname + c.DestHostETLDPlusOne, _ = etldPlusOne(hostname) + } + } + + // Add process info by finding the process that holds the socket's inode. + if proc := m.ptable.ProcessBySocketInode(c.Inode); proc != nil { + c.PID = proc.PID + c.Exe = proc.Executable + c.Command = proc.Command + c.CmdLine = proc.CmdLine + } else if m.euid == 0 { + if c.Inode == 0 { + c.ProcessError = fmt.Errorf("process has exited. inode=%v, tcp_state=%v", + c.Inode, c.State) + } else { + c.ProcessError = fmt.Errorf("process not found. inode=%v, tcp_state=%v", + c.Inode, c.State) + } + } +} + +type connection struct { + Family linux.AddressFamily + LocalIP net.IP + LocalPort int + RemoteIP net.IP + RemotePort int + + State linux.TCPState + Direction Direction + + DestHost string // Reverse lookup of dest IP. + DestHostETLDPlusOne string + DestHostError error // Resolver error. + + // Process identifiers. + Inode uint32 // Inode of the socket. + PID int // PID of the socket owner. + Exe string // Absolute path to the executable. + Command string // Command + CmdLine string // Full command line with arguments. + ProcessError error // Reason process info is unavailable. + + // User identifiers. + UID uint32 // UID of the socket owner. + Username string // Username of the socket. +} + +func newConnection(diag *linux.InetDiagMsg) *connection { + return &connection{ + Family: linux.AddressFamily(diag.Family), + State: linux.TCPState(diag.State), + LocalIP: diag.SrcIP(), + LocalPort: diag.SrcPort(), + RemoteIP: diag.DstIP(), + RemotePort: diag.DstPort(), + Inode: diag.Inode, + UID: diag.UID, + PID: -1, + } +} + +func (c *connection) ToMapStr() common.MapStr { + evt := common.MapStr{ + "family": c.Family.String(), + "local": common.MapStr{ + "ip": c.LocalIP.String(), + "port": c.LocalPort, + }, + "user": common.MapStr{ + "id": c.UID, + }, + "direction": c.Direction.String(), + } + + if c.Username != "" { + evt.Put("user.name", c.Username) + } + + if c.ProcessError != nil { + evt.Put("process.error", c.ProcessError.Error()) + } else { + process := common.MapStr{"pid": c.PID} + evt["process"] = process + + if c.PID > 0 { + addOptionalString(process, "exe", c.Exe) + addOptionalString(process, "command", c.Command) + addOptionalString(process, "cmdline", c.CmdLine) + } else if c.PID == 0 { + process["command"] = "kernel" + } + } + + if c.RemotePort != 0 { + remote := common.MapStr{ + "ip": c.RemoteIP.String(), + "port": c.RemotePort, + } + evt["remote"] = remote + + if c.DestHostError != nil { + remote["host_error"] = c.DestHostError.Error() + } else { + addOptionalString(remote, "host", c.DestHost) + addOptionalString(remote, "etld_plus_one", c.DestHostETLDPlusOne) + } + } + + return evt +} + +func addOptionalString(m common.MapStr, key, value string) { + if value == "" { + return + } + m[key] = value +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/socket/socket_test.go b/vendor/github.com/elastic/beats/metricbeat/module/system/socket/socket_test.go new file mode 100644 index 00000000..9e57e751 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/socket/socket_test.go @@ -0,0 +1,107 @@ +// +build linux + +package socket + +import ( + "net" + "os" + "strconv" + "strings" + "testing" + + "github.com/elastic/beats/libbeat/common" + mbtest "github.com/elastic/beats/metricbeat/mb/testing" + + "github.com/stretchr/testify/assert" +) + +func TestData(t *testing.T) { + ln, err := net.Listen("tcp", ":0") + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + f := mbtest.NewEventsFetcher(t, getConfig()) + + if err = mbtest.WriteEvents(f, t); err != nil { + t.Fatal("write", err) + } +} + +func TestFetch(t *testing.T) { + ln, err := net.Listen("tcp", ":0") + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + addr := ln.Addr().String() + i := strings.LastIndex(addr, ":") + listenerPort, err := strconv.Atoi(addr[i+1:]) + if err != nil { + t.Fatal("failed to get port from addr", addr) + } + + f := mbtest.NewEventsFetcher(t, getConfig()) + events, err := f.Fetch() + if err != nil { + t.Fatal("fetch", err) + } + + var found bool + for _, evt := range events { + port, ok := getRequiredValue("local.port", evt, t).(int) + if !ok { + t.Fatal("local.port is not an int") + } + if port != listenerPort { + continue + } + + pid, ok := getRequiredValue("process.pid", evt, t).(int) + if !ok { + t.Fatal("proess.pid is not a int") + } + assert.Equal(t, os.Getpid(), pid) + + uid, ok := getRequiredValue("user.id", evt, t).(uint32) + if !ok { + t.Fatal("user.id is not an uint32") + } + assert.EqualValues(t, os.Geteuid(), uid) + + dir, ok := getRequiredValue("direction", evt, t).(string) + if !ok { + t.Fatal("direction is not a string") + } + assert.Equal(t, "listening", dir) + + _ = getRequiredValue("process.cmdline", evt, t).(string) + _ = getRequiredValue("process.command", evt, t).(string) + _ = getRequiredValue("process.exe", evt, t).(string) + + found = true + break + } + + assert.True(t, found, "listener not found") +} + +func getRequiredValue(key string, m common.MapStr, t testing.TB) interface{} { + v, err := m.GetValue(key) + if err != nil { + t.Fatal(err) + } + if v == nil { + t.Fatalf("key %v not found in %v", key, m) + } + return v +} + +func getConfig() map[string]interface{} { + return map[string]interface{}{ + "module": "system", + "metricsets": []string{"socket"}, + } +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/socket/user_cache.go b/vendor/github.com/elastic/beats/metricbeat/module/system/socket/user_cache.go new file mode 100644 index 00000000..729fef4f --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/socket/user_cache.go @@ -0,0 +1,32 @@ +package socket + +import ( + "os/user" + "strconv" +) + +// UserCache is a cache of UID to username. +type UserCache map[int]string + +// NewUserCache returns a new UserCache. +func NewUserCache() UserCache { + return map[int]string{0: "root"} +} + +// LookupUID looks up a UID and returns the username associated with it. If +// no username could be found an empty string is returned. The value will be +// cached forever. +func (c UserCache) LookupUID(uid int) string { + if username, found := c[uid]; found { + return username + } + + // Cache the value (even on error). + username, err := user.LookupId(strconv.Itoa(uid)) + if err != nil { + c[uid] = "" + return "" + } + c[uid] = username.Name + return username.Name +} diff --git a/vendor/github.com/elastic/beats/metricbeat/scripts/create_metricset.py b/vendor/github.com/elastic/beats/metricbeat/scripts/create_metricset.py index 2ad710d6..1f8425eb 100644 --- a/vendor/github.com/elastic/beats/metricbeat/scripts/create_metricset.py +++ b/vendor/github.com/elastic/beats/metricbeat/scripts/create_metricset.py @@ -12,14 +12,14 @@ def generate_metricset(base_path, metricbeat_path, module, metricset): meta_path = metricset_path + "/_meta" if os.path.isdir(metricset_path): - print "MericSet already exists. Skipping creating metricset " + metricset + ".\n" + print("Metricset already exists. Skipping creating metricset {}" + .format(metricset)) return os.makedirs(meta_path) templates = metricbeat_path + "/scripts/module/metricset/" - content = load_file(templates + "metricset.go.tmpl", module, metricset) with open(metricset_path + "/" + metricset + ".go", "w") as f: f.write(content) @@ -36,7 +36,7 @@ def generate_metricset(base_path, metricbeat_path, module, metricset): with open(meta_path + "/data.json", "w") as f: f.write(content) - print "Metricset " + metricset + " created." + print("Metricset {} created.".format(metricset)) def generate_module(base_path, metricbeat_path, module, metricset): @@ -45,7 +45,8 @@ def generate_module(base_path, metricbeat_path, module, metricset): meta_path = module_path + "/_meta" if os.path.isdir(module_path): - print "Module already exists. Skipping creating module " + module + ".\n" + print("Module already exists. Skipping creating module {}" + .format(module)) return os.makedirs(meta_path) @@ -68,7 +69,7 @@ def generate_module(base_path, metricbeat_path, module, metricset): with open(module_path + "/doc.go", "w") as f: f.write(content) - print "Module " + module + " created." + print("Module {} created.".format(module)) def load_file(file, module, metricset): @@ -76,7 +77,8 @@ def load_file(file, module, metricset): with open(file) as f: content = f.read() - return content.replace("{module}", module).replace("{metricset}", metricset) + return content.replace("{module}", module).replace("{metricset}", + metricset) if __name__ == "__main__": @@ -85,7 +87,8 @@ if __name__ == "__main__": parser.add_argument("--metricset", help="Metricset name") parser.add_argument("--path", help="Beat path") - parser.add_argument("--es_beats", help="The path to the general beats folder") + parser.add_argument("--es_beats", + help="The path to the general beats folder") args = parser.parse_args() @@ -106,4 +109,5 @@ if __name__ == "__main__": path = os.path.abspath(args.path) metricbeat_path = os.path.abspath(args.es_beats + "/metricbeat") - generate_metricset(path, metricbeat_path, args.module.lower(), args.metricset.lower()) + generate_metricset(path, metricbeat_path, args.module.lower(), + args.metricset.lower()) diff --git a/vendor/github.com/elastic/beats/metricbeat/tests/system/test_config.py b/vendor/github.com/elastic/beats/metricbeat/tests/system/test_config.py new file mode 100644 index 00000000..a74bcb66 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/tests/system/test_config.py @@ -0,0 +1,69 @@ +import os +import metricbeat +import unittest +from nose.plugins.attrib import attr +import urllib2 +import time + + +class ConfigTest(metricbeat.BaseTest): + @unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test") + @attr('integration') + def test_compare_config(self): + """ + Compare full and short config output + """ + + # Copy over full and normal config + + self.copy_files(["metricbeat.yml", "metricbeat.full.yml"], + source_dir="../../", + target_dir=".") + + proc = self.start_beat(config="metricbeat.yml", output="short.log", + extra_args=["-E", "output.elasticsearch.hosts=['" + self.get_host() + "']"]) + time.sleep(1) + proc.check_kill_and_wait() + + + proc = self.start_beat(config="metricbeat.full.yml", output="full.log", + extra_args=["-E", "output.elasticsearch.hosts=['" + self.get_host() + "']"]) + time.sleep(1) + proc.check_kill_and_wait() + + # Fetch first 27 lines + # Remove timestamp + + shortLog = [] + with open(os.path.join(self.working_dir, "short.log"), "r") as f: + for i in range(27): + # Remove 27 chars of timestamp + shortLog.append(f.next()[27:]) + + fullLog = [] + with open(os.path.join(self.working_dir, "full.log"), "r") as f: + for i in range(27): + # Remove 27 chars of timestamp + fullLog.append(f.next()[27:]) + + same = True + + for i in range(27): + shortLine = shortLog[i] + fullLine = fullLog[i] + + if shortLine not in fullLog: + print shortLine + print fullLine + same = False + + if fullLine not in shortLog: + print shortLine + print fullLine + same = False + + assert same == True + + + def get_host(self): + return 'http://' + os.getenv('ELASTICSEARCH_HOST', 'localhost') + ':' + os.getenv('ELASTICSEARCH_PORT', '9200') diff --git a/vendor/github.com/elastic/beats/metricbeat/tests/system/test_docker.py b/vendor/github.com/elastic/beats/metricbeat/tests/system/test_docker.py index 9be39ad5..69019b0a 100644 --- a/vendor/github.com/elastic/beats/metricbeat/tests/system/test_docker.py +++ b/vendor/github.com/elastic/beats/metricbeat/tests/system/test_docker.py @@ -14,7 +14,7 @@ class Test(metricbeat.BaseTest): "name": "docker", "metricsets": ["container"], "hosts": ["unix:///var/run/docker.sock"], - "period": "1s", + "period": "10s", }]) proc = self.start_beat() @@ -40,7 +40,7 @@ class Test(metricbeat.BaseTest): "name": "docker", "metricsets": ["cpu"], "hosts": ["unix:///var/run/docker.sock"], - "period": "1s" + "period": "10s" }]) proc = self.start_beat() @@ -70,7 +70,7 @@ class Test(metricbeat.BaseTest): "name": "docker", "metricsets": ["diskio"], "hosts": ["unix:///var/run/docker.sock"], - "period": "1s" + "period": "10s" }]) proc = self.start_beat() @@ -97,7 +97,7 @@ class Test(metricbeat.BaseTest): "name": "docker", "metricsets": ["info"], "hosts": ["unix:///var/run/docker.sock"], - "period": "1s" + "period": "10s" }]) proc = self.start_beat() @@ -122,7 +122,7 @@ class Test(metricbeat.BaseTest): "name": "docker", "metricsets": ["memory"], "hosts": ["unix:///var/run/docker.sock"], - "period": "1s" + "period": "10s" }]) proc = self.start_beat() @@ -148,7 +148,7 @@ class Test(metricbeat.BaseTest): "name": "docker", "metricsets": ["network"], "hosts": ["unix:///var/run/docker.sock"], - "period": "1s" + "period": "10s" }]) proc = self.start_beat() @@ -157,6 +157,7 @@ class Test(metricbeat.BaseTest): # Ensure no errors or warnings exist in the log. log = self.get_log() + self.assertNotRegexpMatches(log.replace("WARN EXPERIMENTAL", ""), "ERR|WARN") output = self.read_output_json() diff --git a/vendor/github.com/elastic/beats/metricbeat/tests/system/test_processors.py b/vendor/github.com/elastic/beats/metricbeat/tests/system/test_processors.py index e5b0202d..371672c7 100644 --- a/vendor/github.com/elastic/beats/metricbeat/tests/system/test_processors.py +++ b/vendor/github.com/elastic/beats/metricbeat/tests/system/test_processors.py @@ -39,7 +39,7 @@ class TestProcessors(metricbeat.BaseTest): cpu = evt["system"]["cpu"] print(cpu.keys()) self.assertItemsEqual(self.de_dot([ - "system", "user", "softirq", "iowait", + "system", "cores", "user", "softirq", "iowait", "idle", "irq", "steal", "nice" ]), cpu.keys()) diff --git a/vendor/github.com/elastic/beats/metricbeat/tests/system/test_prometheus.py b/vendor/github.com/elastic/beats/metricbeat/tests/system/test_prometheus.py new file mode 100644 index 00000000..b9b0fbcf --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/tests/system/test_prometheus.py @@ -0,0 +1,38 @@ +import os +import metricbeat +import unittest +from nose.plugins.attrib import attr + +PROMETHEUS_FIELDS = metricbeat.COMMON_FIELDS + ["prometheus"] + +class Test(metricbeat.BaseTest): + @unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test") + def test_stats(self): + """ + prometheus stats test + """ + self.render_config_template(modules=[{ + "name": "prometheus", + "metricsets": ["stats"], + "hosts": self.get_hosts(), + "period": "5s" + }]) + proc = self.start_beat() + self.wait_until(lambda: self.output_lines() > 0) + proc.check_kill_and_wait() + + # Ensure no errors or warnings exist in the log. + log = self.get_log() + self.assertNotRegexpMatches(log.replace("WARN EXPERIMENTAL", ""), "ERR|WARN") + + output = self.read_output_json() + self.assertEqual(len(output), 1) + evt = output[0] + + self.assertItemsEqual(self.de_dot(PROMETHEUS_FIELDS), evt.keys(), evt) + + self.assert_fields_are_documented(evt) + + def get_hosts(self): + return ["http://" + os.getenv('PROMETHEUS_HOST', 'localhost') + ':' + + os.getenv('PROMETHEUS_PORT', '9090')] diff --git a/vendor/github.com/elastic/beats/metricbeat/tests/system/test_system.py b/vendor/github.com/elastic/beats/metricbeat/tests/system/test_system.py index e9910f3f..25400d74 100644 --- a/vendor/github.com/elastic/beats/metricbeat/tests/system/test_system.py +++ b/vendor/github.com/elastic/beats/metricbeat/tests/system/test_system.py @@ -5,10 +5,10 @@ import metricbeat import getpass import os -SYSTEM_CPU_FIELDS = ["idle.pct", "iowait.pct", "irq.pct", "nice.pct", +SYSTEM_CPU_FIELDS = ["cores", "idle.pct", "iowait.pct", "irq.pct", "nice.pct", "softirq.pct", "steal.pct", "system.pct", "user.pct"] -SYSTEM_CPU_FIELDS_ALL = ["idle.pct", "idle.ticks", "iowait.pct", "iowait.ticks", "irq.pct", "irq.ticks", "nice.pct", "nice.ticks", +SYSTEM_CPU_FIELDS_ALL = ["cores", "idle.pct", "idle.ticks", "iowait.pct", "iowait.ticks", "irq.pct", "irq.ticks", "nice.pct", "nice.ticks", "softirq.pct", "softirq.ticks", "steal.pct", "steal.ticks", "system.pct", "system.ticks", "user.pct", "user.ticks"] SYSTEM_LOAD_FIELDS = ["1", "5", "15", "norm.1", "norm.5", "norm.15"] diff --git a/vendor/github.com/elastic/beats/packetbeat/Dockerfile b/vendor/github.com/elastic/beats/packetbeat/Dockerfile index 3c128792..9716f7ff 100644 --- a/vendor/github.com/elastic/beats/packetbeat/Dockerfile +++ b/vendor/github.com/elastic/beats/packetbeat/Dockerfile @@ -1,5 +1,5 @@ # Beats dockerfile used for testing -FROM golang:1.7.1 +FROM golang:1.7.4 MAINTAINER Nicolas Ruflin RUN set -x && \ diff --git a/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/dashboard/Packetbeat-Cassandra.json b/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/dashboard/Packetbeat-Cassandra.json index 1b718def..dbdad961 100644 --- a/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/dashboard/Packetbeat-Cassandra.json +++ b/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/dashboard/Packetbeat-Cassandra.json @@ -1,13 +1,13 @@ { - "hits": 0, - "timeRestore": false, - "description": "", - "title": "Packetbeat Cassandra", - "uiStateJSON": "{\"P-10\":{\"vis\":{\"legendOpen\":false}},\"P-17\":{\"vis\":{\"legendOpen\":false}},\"P-18\":{\"vis\":{\"legendOpen\":false}}}", - "panelsJSON": "[{\"col\":10,\"id\":\"Cassandra-ResponseKeyspace\",\"panelIndex\":3,\"row\":3,\"size_x\":3,\"size_y\":2,\"type\":\"visualization\"},{\"col\":7,\"id\":\"Cassandra-ResponseType\",\"panelIndex\":4,\"row\":3,\"size_x\":3,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Cassandra-ResponseTime\",\"panelIndex\":9,\"row\":5,\"size_x\":12,\"size_y\":2,\"type\":\"visualization\"},{\"col\":4,\"id\":\"Cassandra-RequestCount\",\"panelIndex\":10,\"row\":1,\"size_x\":9,\"size_y\":2,\"type\":\"visualization\"},{\"col\":4,\"id\":\"Cassandra-Ops\",\"panelIndex\":11,\"row\":3,\"size_x\":3,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Cassandra-RequestCountStackByType\",\"panelIndex\":15,\"row\":7,\"size_x\":12,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Cassandra-ResponseCountStackByType\",\"panelIndex\":16,\"row\":9,\"size_x\":12,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Cassandra-RequestCountByType\",\"panelIndex\":17,\"row\":11,\"size_x\":6,\"size_y\":3,\"type\":\"visualization\"},{\"col\":7,\"id\":\"Cassandra-ResponseCountByType\",\"panelIndex\":18,\"row\":11,\"size_x\":6,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Navigation\",\"panelIndex\":19,\"row\":1,\"size_x\":3,\"size_y\":4,\"type\":\"visualization\"},{\"id\":\"Cassandra-QueryView\",\"type\":\"search\",\"panelIndex\":20,\"size_x\":12,\"size_y\":3,\"col\":1,\"row\":14,\"columns\":[\"cassandra.request.query\",\"cassandra.response.result.rows.meta.keyspace\",\"cassandra.response.result.rows.meta.table\",\"cassandra.response.result.rows.num_rows\"],\"sort\":[\"@timestamp\",\"desc\"]}]", - "optionsJSON": "{\"darkTheme\":false}", - "version": 1, + "hits": 0, + "timeRestore": false, + "description": "", + "title": "Packetbeat Cassandra", + "uiStateJSON": "{\"P-10\":{\"vis\":{\"legendOpen\":false}},\"P-17\":{\"vis\":{\"legendOpen\":false}},\"P-18\":{\"vis\":{\"legendOpen\":false}}}", + "panelsJSON": "[{\"col\":10,\"id\":\"Cassandra-ResponseKeyspace\",\"panelIndex\":3,\"row\":3,\"size_x\":3,\"size_y\":2,\"type\":\"visualization\"},{\"col\":7,\"id\":\"Cassandra-ResponseType\",\"panelIndex\":4,\"row\":3,\"size_x\":3,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Cassandra-ResponseTime\",\"panelIndex\":9,\"row\":5,\"size_x\":12,\"size_y\":2,\"type\":\"visualization\"},{\"col\":4,\"id\":\"Cassandra-RequestCount\",\"panelIndex\":10,\"row\":1,\"size_x\":9,\"size_y\":2,\"type\":\"visualization\"},{\"col\":4,\"id\":\"Cassandra-Ops\",\"panelIndex\":11,\"row\":3,\"size_x\":3,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Cassandra-RequestCountStackByType\",\"panelIndex\":15,\"row\":7,\"size_x\":12,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Cassandra-ResponseCountStackByType\",\"panelIndex\":16,\"row\":9,\"size_x\":12,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Cassandra-RequestCountByType\",\"panelIndex\":17,\"row\":11,\"size_x\":6,\"size_y\":3,\"type\":\"visualization\"},{\"col\":7,\"id\":\"Cassandra-ResponseCountByType\",\"panelIndex\":18,\"row\":11,\"size_x\":6,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Navigation\",\"panelIndex\":19,\"row\":1,\"size_x\":3,\"size_y\":4,\"type\":\"visualization\"},{\"id\":\"Cassandra-QueryView\",\"type\":\"search\",\"panelIndex\":20,\"size_x\":12,\"size_y\":3,\"col\":1,\"row\":14,\"columns\":[\"cassandra.request.query\",\"cassandra.response.result.rows.meta.keyspace\",\"cassandra.response.result.rows.meta.table\",\"cassandra.response.result.rows.num_rows\"],\"sort\":[\"@timestamp\",\"desc\"]}]", + "optionsJSON": "{\"darkTheme\":false}", + "version": 1, "kibanaSavedObjectMeta": { "searchSourceJSON": "{\"filter\":[{\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"*\"}}}]}" } -} +} \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/search/Cassandra-QueryView.json b/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/search/Cassandra-QueryView.json index 6982f891..9536d5b3 100644 --- a/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/search/Cassandra-QueryView.json +++ b/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/search/Cassandra-QueryView.json @@ -5,7 +5,7 @@ ], "hits": 0, "description": "", - "title": "Cassandra: QueryView", + "title": "Cassandra QueryView", "version": 1, "kibanaSavedObjectMeta": { "searchSourceJSON": "{\"index\":\"packetbeat-*\",\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"*\"}},\"filter\":[{\"meta\":{\"negate\":false,\"index\":\"packetbeat-*\",\"key\":\"cassandra.request.headers.op\",\"value\":\"QUERY\",\"disabled\":false,\"alias\":null},\"query\":{\"match\":{\"cassandra.request.headers.op\":{\"query\":\"QUERY\",\"type\":\"phrase\"}}},\"$state\":{\"store\":\"appState\"}},{\"meta\":{\"negate\":true,\"index\":\"packetbeat-*\",\"key\":\"cassandra.response.headers.op\",\"value\":\"ERROR\",\"disabled\":false,\"alias\":null},\"query\":{\"match\":{\"cassandra.response.headers.op\":{\"query\":\"ERROR\",\"type\":\"phrase\"}}},\"$state\":{\"store\":\"appState\"}}],\"highlight\":{\"pre_tags\":[\"@kibana-highlighted-field@\"],\"post_tags\":[\"@/kibana-highlighted-field@\"],\"fields\":{\"*\":{}},\"require_field_match\":false,\"fragment_size\":2147483647}}" diff --git a/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/search/NFS-errors-search.json b/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/search/NFS-errors-search.json new file mode 100644 index 00000000..f34a6ca3 --- /dev/null +++ b/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/search/NFS-errors-search.json @@ -0,0 +1,16 @@ +{ + "sort": [ + "@timestamp", + "desc" + ], + "hits": 0, + "description": "", + "title": "NFS errors search", + "version": 1, + "kibanaSavedObjectMeta": { + "searchSourceJSON": "{\"index\":\"packetbeat-*\",\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true}},\"filter\":[{\"meta\":{\"negate\":false,\"index\":\"packetbeat-*\",\"key\":\"type\",\"value\":\"nfs\",\"disabled\":false,\"alias\":null},\"query\":{\"match\":{\"type\":{\"query\":\"nfs\",\"type\":\"phrase\"}}},\"$state\":{\"store\":\"appState\"}},{\"meta\":{\"negate\":true,\"index\":\"packetbeat-*\",\"key\":\"nfs.status\",\"value\":\"NFSERR_NOENT\",\"disabled\":false,\"alias\":null},\"query\":{\"match\":{\"nfs.status\":{\"query\":\"NFSERR_NOENT\",\"type\":\"phrase\"}}},\"$state\":{\"store\":\"appState\"}},{\"meta\":{\"negate\":true,\"index\":\"packetbeat-*\",\"key\":\"nfs.status\",\"value\":\"NFS_OK\",\"disabled\":false,\"alias\":null},\"query\":{\"match\":{\"nfs.status\":{\"query\":\"NFS_OK\",\"type\":\"phrase\"}}},\"$state\":{\"store\":\"appState\"}}],\"highlight\":{\"pre_tags\":[\"@kibana-highlighted-field@\"],\"post_tags\":[\"@/kibana-highlighted-field@\"],\"fields\":{\"*\":{}},\"require_field_match\":false,\"fragment_size\":2147483647}}" + }, + "columns": [ + "_source" + ] +} \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/Cassandra-Ops.json b/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/Cassandra-Ops.json index 42a39dcd..898c9ad4 100644 --- a/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/Cassandra-Ops.json +++ b/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/Cassandra-Ops.json @@ -1,10 +1,10 @@ { - "visState": "{\"title\":\"Cassandra: Ops\",\"type\":\"pie\",\"params\":{\"shareYAxis\":true,\"addTooltip\":true,\"addLegend\":true,\"isDonut\":true},\"aggs\":[{\"id\":\"1\",\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"cassandra.request.headers.op\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}},{\"id\":\"3\",\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"cassandra.response.headers.op\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}", + "visState": "{\"title\":\"Cassandra Ops\",\"type\":\"pie\",\"params\":{\"shareYAxis\":true,\"addTooltip\":true,\"addLegend\":true,\"isDonut\":true,\"legendPosition\":\"right\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"cassandra.request.headers.op\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"cassandra.response.headers.op\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}", "description": "", - "title": "Cassandra: Ops", + "title": "Cassandra Ops", "uiStateJSON": "{}", "version": 1, "kibanaSavedObjectMeta": { - "searchSourceJSON": "{\"index\":\"packetbeat-*\",\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true}},\"filter\":[]}" + "searchSourceJSON": "{\"index\":\"packetbeat-*\",\"query\":{\"query_string\":{\"query\":\"type:cassandra\",\"analyze_wildcard\":true}},\"filter\":[]}" } } \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/Cassandra-RequestCount.json b/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/Cassandra-RequestCount.json index 651a3d17..0ef8a4e1 100644 --- a/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/Cassandra-RequestCount.json +++ b/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/Cassandra-RequestCount.json @@ -1,10 +1,10 @@ { - "visState": "{\"title\":\"Cassandra: RequestCount\",\"type\":\"line\",\"params\":{\"shareYAxis\":true,\"addTooltip\":true,\"addLegend\":true,\"showCircles\":true,\"smoothLines\":true,\"interpolate\":\"linear\",\"scale\":\"square root\",\"drawLinesBetweenPoints\":true,\"radiusRatio\":9,\"times\":[],\"addTimeMarker\":false,\"defaultYExtents\":false,\"setYExtents\":false,\"yAxis\":{}},\"aggs\":[{\"id\":\"1\",\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"type\":\"date_histogram\",\"schema\":\"segment\",\"params\":{\"field\":\"@timestamp\",\"interval\":\"auto\",\"customInterval\":\"2h\",\"min_doc_count\":1,\"extended_bounds\":{}}}],\"listeners\":{}}", + "visState": "{\"title\":\"Cassandra RequestCount\",\"type\":\"line\",\"params\":{\"shareYAxis\":true,\"addTooltip\":true,\"addLegend\":true,\"showCircles\":true,\"smoothLines\":true,\"interpolate\":\"linear\",\"scale\":\"square root\",\"drawLinesBetweenPoints\":true,\"radiusRatio\":9,\"times\":[],\"addTimeMarker\":false,\"defaultYExtents\":false,\"setYExtents\":false,\"yAxis\":{},\"legendPosition\":\"right\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"date_histogram\",\"schema\":\"segment\",\"params\":{\"field\":\"@timestamp\",\"interval\":\"auto\",\"customInterval\":\"2h\",\"min_doc_count\":1,\"extended_bounds\":{}}}],\"listeners\":{}}", "description": "", - "title": "Cassandra: RequestCount", + "title": "Cassandra RequestCount", "uiStateJSON": "{}", "version": 1, "kibanaSavedObjectMeta": { - "searchSourceJSON": "{\"index\":\"packetbeat-*\",\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"*\"}},\"filter\":[]}" + "searchSourceJSON": "{\"index\":\"packetbeat-*\",\"query\":{\"query_string\":{\"query\":\"type:cassandra\",\"analyze_wildcard\":true}},\"filter\":[]}" } } \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/Cassandra-RequestCountByType.json b/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/Cassandra-RequestCountByType.json index 99d977b6..c8d9c4cb 100644 --- a/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/Cassandra-RequestCountByType.json +++ b/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/Cassandra-RequestCountByType.json @@ -1,7 +1,7 @@ { - "visState": "{\"title\":\"Cassandra: RequestCountByType\",\"type\":\"line\",\"params\":{\"shareYAxis\":true,\"addTooltip\":true,\"addLegend\":true,\"showCircles\":true,\"smoothLines\":true,\"interpolate\":\"linear\",\"scale\":\"log\",\"drawLinesBetweenPoints\":false,\"radiusRatio\":\"13\",\"times\":[],\"addTimeMarker\":false,\"defaultYExtents\":false,\"setYExtents\":false,\"yAxis\":{}},\"aggs\":[{\"id\":\"1\",\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"type\":\"date_histogram\",\"schema\":\"segment\",\"params\":{\"field\":\"@timestamp\",\"interval\":\"auto\",\"customInterval\":\"2h\",\"min_doc_count\":1,\"extended_bounds\":{}}},{\"id\":\"3\",\"type\":\"terms\",\"schema\":\"group\",\"params\":{\"field\":\"cassandra.request.headers.op\",\"size\":10,\"order\":\"desc\",\"orderBy\":\"1\"}},{\"id\":\"4\",\"type\":\"count\",\"schema\":\"radius\",\"params\":{}}],\"listeners\":{}}", + "visState": "{\"title\":\"Cassandra RequestCountByType\",\"type\":\"line\",\"params\":{\"shareYAxis\":true,\"addTooltip\":true,\"addLegend\":true,\"showCircles\":true,\"smoothLines\":true,\"interpolate\":\"linear\",\"scale\":\"log\",\"drawLinesBetweenPoints\":false,\"radiusRatio\":\"13\",\"times\":[],\"addTimeMarker\":false,\"defaultYExtents\":false,\"setYExtents\":false,\"yAxis\":{},\"legendPosition\":\"right\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"date_histogram\",\"schema\":\"segment\",\"params\":{\"field\":\"@timestamp\",\"interval\":\"auto\",\"customInterval\":\"2h\",\"min_doc_count\":1,\"extended_bounds\":{}}},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"group\",\"params\":{\"field\":\"cassandra.request.headers.op\",\"size\":10,\"order\":\"desc\",\"orderBy\":\"1\"}},{\"id\":\"4\",\"enabled\":true,\"type\":\"count\",\"schema\":\"radius\",\"params\":{}}],\"listeners\":{}}", "description": "", - "title": "Cassandra: RequestCountByType", + "title": "Cassandra RequestCountByType", "uiStateJSON": "{}", "version": 1, "kibanaSavedObjectMeta": { diff --git a/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/Cassandra-RequestCountStackByType.json b/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/Cassandra-RequestCountStackByType.json index b7e55bc3..880b8a6a 100644 --- a/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/Cassandra-RequestCountStackByType.json +++ b/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/Cassandra-RequestCountStackByType.json @@ -1,10 +1,10 @@ { - "visState": "{\n \"title\": \"Cassandra: RequestCountStackByType\",\n \"type\": \"histogram\",\n \"params\": {\n \"shareYAxis\": true,\n \"addTooltip\": true,\n \"addLegend\": true,\n \"scale\": \"linear\",\n \"mode\": \"stacked\",\n \"times\": [],\n \"addTimeMarker\": false,\n \"defaultYExtents\": false,\n \"setYExtents\": false,\n \"yAxis\": {}\n },\n \"aggs\": [\n {\n \"id\": \"1\",\n \"type\": \"count\",\n \"schema\": \"metric\",\n \"params\": {}\n },\n {\n \"id\": \"2\",\n \"type\": \"date_histogram\",\n \"schema\": \"segment\",\n \"params\": {\n \"field\": \"@timestamp\",\n \"interval\": \"auto\",\n \"customInterval\": \"2h\",\n \"min_doc_count\": 1,\n \"extended_bounds\": {}\n }\n },\n {\n \"id\": \"3\",\n \"type\": \"terms\",\n \"schema\": \"group\",\n \"params\": {\n \"field\": \"cassandra.request.headers.op\",\n \"size\": 5,\n \"order\": \"desc\",\n \"orderBy\": \"1\"\n }\n }\n ],\n \"listeners\": {}\n}", + "visState": "{\"title\":\"Cassandra RequestCountStackByType\",\"type\":\"histogram\",\"params\":{\"shareYAxis\":true,\"addTooltip\":true,\"addLegend\":true,\"scale\":\"linear\",\"mode\":\"stacked\",\"times\":[],\"addTimeMarker\":false,\"defaultYExtents\":false,\"setYExtents\":false,\"yAxis\":{},\"legendPosition\":\"right\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"date_histogram\",\"schema\":\"segment\",\"params\":{\"field\":\"@timestamp\",\"interval\":\"auto\",\"customInterval\":\"2h\",\"min_doc_count\":1,\"extended_bounds\":{}}},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"group\",\"params\":{\"field\":\"cassandra.request.headers.op\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}", "description": "", - "title": "Cassandra: RequestCountStackByType", + "title": "Cassandra RequestCountStackByType", "uiStateJSON": "{}", "version": 1, "kibanaSavedObjectMeta": { - "searchSourceJSON": "{\n \"index\": \"packetbeat-*\",\n \"query\": {\n \"query_string\": {\n \"query\": \"*\",\n \"analyze_wildcard\": true\n }\n },\n \"filter\": []\n}" + "searchSourceJSON": "{\"index\":\"packetbeat-*\",\"query\":{\"query_string\":{\"query\":\"type:cassandra\",\"analyze_wildcard\":true}},\"filter\":[]}" } } \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/Cassandra-ResponseCountByType.json b/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/Cassandra-ResponseCountByType.json index 50ac2e05..022737ca 100644 --- a/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/Cassandra-ResponseCountByType.json +++ b/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/Cassandra-ResponseCountByType.json @@ -1,10 +1,10 @@ { "visState": "{\"title\":\"Cassandra: ResponseCountByType\",\"type\":\"line\",\"params\":{\"shareYAxis\":true,\"addTooltip\":true,\"addLegend\":true,\"showCircles\":true,\"smoothLines\":true,\"interpolate\":\"linear\",\"scale\":\"log\",\"drawLinesBetweenPoints\":false,\"radiusRatio\":\"15\",\"times\":[],\"addTimeMarker\":false,\"defaultYExtents\":false,\"setYExtents\":false,\"yAxis\":{}},\"aggs\":[{\"id\":\"1\",\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"type\":\"date_histogram\",\"schema\":\"segment\",\"params\":{\"field\":\"@timestamp\",\"interval\":\"auto\",\"customInterval\":\"2h\",\"min_doc_count\":1,\"extended_bounds\":{}}},{\"id\":\"3\",\"type\":\"terms\",\"schema\":\"group\",\"params\":{\"field\":\"cassandra.response.headers.op\",\"size\":10,\"order\":\"desc\",\"orderBy\":\"1\"}},{\"id\":\"4\",\"type\":\"count\",\"schema\":\"radius\",\"params\":{}}],\"listeners\":{}}", "description": "", - "title": "Cassandra: ResponseCountByType", + "title": "Cassandra ResponseCountByType", "uiStateJSON": "{}", "version": 1, "kibanaSavedObjectMeta": { "searchSourceJSON": "{\"index\":\"packetbeat-*\",\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"*\"}},\"filter\":[]}" } -} \ No newline at end of file +} diff --git a/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/Cassandra-ResponseCountStackByType.json b/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/Cassandra-ResponseCountStackByType.json index 6088dbf8..22d35c6f 100644 --- a/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/Cassandra-ResponseCountStackByType.json +++ b/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/Cassandra-ResponseCountStackByType.json @@ -1,10 +1,10 @@ { - "visState": "{\n \"title\": \"Cassandra: ResponseCountStackByType\",\n \"type\": \"histogram\",\n \"params\": {\n \"shareYAxis\": true,\n \"addTooltip\": true,\n \"addLegend\": true,\n \"scale\": \"linear\",\n \"mode\": \"stacked\",\n \"times\": [],\n \"addTimeMarker\": false,\n \"defaultYExtents\": false,\n \"setYExtents\": false,\n \"yAxis\": {}\n },\n \"aggs\": [\n {\n \"id\": \"1\",\n \"type\": \"count\",\n \"schema\": \"metric\",\n \"params\": {}\n },\n {\n \"id\": \"2\",\n \"type\": \"date_histogram\",\n \"schema\": \"segment\",\n \"params\": {\n \"field\": \"@timestamp\",\n \"interval\": \"auto\",\n \"customInterval\": \"2h\",\n \"min_doc_count\": 1,\n \"extended_bounds\": {}\n }\n },\n {\n \"id\": \"3\",\n \"type\": \"terms\",\n \"schema\": \"group\",\n \"params\": {\n \"field\": \"cassandra.response.headers.op\",\n \"size\": 5,\n \"order\": \"desc\",\n \"orderBy\": \"1\"\n }\n }\n ],\n \"listeners\": {}\n}", + "visState": "{\"title\":\"Cassandra ResponseCountStackByType\",\"type\":\"histogram\",\"params\":{\"shareYAxis\":true,\"addTooltip\":true,\"addLegend\":true,\"scale\":\"linear\",\"mode\":\"stacked\",\"times\":[],\"addTimeMarker\":false,\"defaultYExtents\":false,\"setYExtents\":false,\"yAxis\":{},\"legendPosition\":\"right\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"date_histogram\",\"schema\":\"segment\",\"params\":{\"field\":\"@timestamp\",\"interval\":\"auto\",\"customInterval\":\"2h\",\"min_doc_count\":1,\"extended_bounds\":{}}},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"group\",\"params\":{\"field\":\"cassandra.response.headers.op\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}", "description": "", - "title": "Cassandra: ResponseCountStackByType", + "title": "Cassandra ResponseCountStackByType", "uiStateJSON": "{}", "version": 1, "kibanaSavedObjectMeta": { - "searchSourceJSON": "{\n \"index\": \"packetbeat-*\",\n \"query\": {\n \"query_string\": {\n \"query\": \"*\",\n \"analyze_wildcard\": true\n }\n },\n \"filter\": []\n}" + "searchSourceJSON": "{\"index\":\"packetbeat-*\",\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true}},\"filter\":[]}" } } \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/Cassandra-ResponseKeyspace.json b/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/Cassandra-ResponseKeyspace.json index 789d4597..02997230 100644 --- a/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/Cassandra-ResponseKeyspace.json +++ b/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/Cassandra-ResponseKeyspace.json @@ -1,7 +1,7 @@ { - "visState": "{\"title\":\"Cassandra: ResponseKeyspace\",\"type\":\"pie\",\"params\":{\"shareYAxis\":true,\"addTooltip\":true,\"addLegend\":true,\"isDonut\":true},\"aggs\":[{\"id\":\"1\",\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"cassandra.response.result.rows.meta.keyspace\",\"size\":10,\"order\":\"desc\",\"orderBy\":\"1\"}},{\"id\":\"3\",\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"cassandra.response.result.rows.meta.table\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}", + "visState": "{\"title\":\"Cassandra ResponseKeyspace\",\"type\":\"pie\",\"params\":{\"shareYAxis\":true,\"addTooltip\":true,\"addLegend\":true,\"isDonut\":true,\"legendPosition\":\"right\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"cassandra.response.result.rows.meta.keyspace\",\"size\":10,\"order\":\"desc\",\"orderBy\":\"1\"}},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"cassandra.response.result.rows.meta.table\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}", "description": "", - "title": "Cassandra: ResponseKeyspace", + "title": "Cassandra ResponseKeyspace", "uiStateJSON": "{}", "version": 1, "kibanaSavedObjectMeta": { diff --git a/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/Cassandra-ResponseTime.json b/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/Cassandra-ResponseTime.json index 194f857e..8ac34feb 100644 --- a/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/Cassandra-ResponseTime.json +++ b/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/Cassandra-ResponseTime.json @@ -1,10 +1,10 @@ { - "visState": "{\"title\":\"Cassandra: ResponseTime\",\"type\":\"line\",\"params\":{\"shareYAxis\":true,\"addTooltip\":true,\"addLegend\":true,\"showCircles\":true,\"smoothLines\":true,\"interpolate\":\"linear\",\"scale\":\"square root\",\"drawLinesBetweenPoints\":true,\"radiusRatio\":9,\"times\":[],\"addTimeMarker\":false,\"defaultYExtents\":false,\"setYExtents\":false,\"yAxis\":{}},\"aggs\":[{\"id\":\"1\",\"type\":\"percentiles\",\"schema\":\"metric\",\"params\":{\"field\":\"responsetime\",\"percents\":[5,25,50,75,95]}},{\"id\":\"2\",\"type\":\"date_histogram\",\"schema\":\"segment\",\"params\":{\"field\":\"@timestamp\",\"interval\":\"auto\",\"customInterval\":\"2h\",\"min_doc_count\":1,\"extended_bounds\":{}}}],\"listeners\":{}}", + "visState": "{\"title\":\"Cassandra ResponseTime\",\"type\":\"line\",\"params\":{\"shareYAxis\":true,\"addTooltip\":true,\"addLegend\":true,\"showCircles\":true,\"smoothLines\":true,\"interpolate\":\"linear\",\"scale\":\"square root\",\"drawLinesBetweenPoints\":true,\"radiusRatio\":9,\"times\":[],\"addTimeMarker\":false,\"defaultYExtents\":false,\"setYExtents\":false,\"yAxis\":{},\"legendPosition\":\"right\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"percentiles\",\"schema\":\"metric\",\"params\":{\"field\":\"responsetime\",\"percents\":[5,25,50,75,95]}},{\"id\":\"2\",\"enabled\":true,\"type\":\"date_histogram\",\"schema\":\"segment\",\"params\":{\"field\":\"@timestamp\",\"interval\":\"auto\",\"customInterval\":\"2h\",\"min_doc_count\":1,\"extended_bounds\":{}}}],\"listeners\":{}}", "description": "", - "title": "Cassandra: ResponseTime", + "title": "Cassandra ResponseTime", "uiStateJSON": "{}", "version": 1, "kibanaSavedObjectMeta": { - "searchSourceJSON": "{\"index\":\"packetbeat-*\",\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true}},\"filter\":[]}" + "searchSourceJSON": "{\"index\":\"packetbeat-*\",\"query\":{\"query_string\":{\"query\":\"type:cassandra\",\"analyze_wildcard\":true}},\"filter\":[]}" } } \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/Cassandra-ResponseType.json b/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/Cassandra-ResponseType.json index 6c3156cc..1f63dbdb 100644 --- a/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/Cassandra-ResponseType.json +++ b/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/Cassandra-ResponseType.json @@ -1,7 +1,7 @@ { - "visState": "{\"title\":\"Cassandra: ResponseType\",\"type\":\"pie\",\"params\":{\"shareYAxis\":true,\"addTooltip\":true,\"addLegend\":true,\"isDonut\":false},\"aggs\":[{\"id\":\"1\",\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"cassandra.response.result.type\",\"size\":10,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}", + "visState": "{\"title\":\"Cassandra ResponseType\",\"type\":\"pie\",\"params\":{\"shareYAxis\":true,\"addTooltip\":true,\"addLegend\":true,\"isDonut\":false,\"legendPosition\":\"right\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"cassandra.response.result.type\",\"size\":10,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}", "description": "", - "title": "Cassandra: ResponseType", + "title": "Cassandra ResponseType", "uiStateJSON": "{}", "version": 1, "kibanaSavedObjectMeta": { diff --git a/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/NFS-errors.json b/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/NFS-errors.json index 041376c9..1ad752d6 100644 --- a/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/NFS-errors.json +++ b/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/NFS-errors.json @@ -1,10 +1,10 @@ { - "visState": "{\"title\":\"NFS errors\",\"type\":\"area\",\"params\":{\"shareYAxis\":true,\"addTooltip\":true,\"addLegend\":true,\"smoothLines\":true,\"scale\":\"linear\",\"interpolate\":\"linear\",\"mode\":\"overlap\",\"times\":[],\"addTimeMarker\":false,\"defaultYExtents\":false,\"setYExtents\":false,\"yAxis\":{}},\"aggs\":[{\"id\":\"1\",\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"type\":\"date_histogram\",\"schema\":\"segment\",\"params\":{\"field\":\"@timestamp\",\"interval\":\"auto\",\"customInterval\":\"2h\",\"min_doc_count\":1,\"extended_bounds\":{}}},{\"id\":\"3\",\"type\":\"terms\",\"schema\":\"group\",\"params\":{\"field\":\"nfs.status\",\"exclude\":{\"pattern\":\"NFS_OK|NFSERR_NOENT\"},\"size\":12,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}", + "visState": "{\"title\":\"NFS errors\",\"type\":\"area\",\"params\":{\"shareYAxis\":true,\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"smoothLines\":false,\"scale\":\"linear\",\"interpolate\":\"linear\",\"mode\":\"stacked\",\"times\":[],\"addTimeMarker\":false,\"defaultYExtents\":false,\"setYExtents\":false,\"yAxis\":{}},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"date_histogram\",\"schema\":\"segment\",\"params\":{\"field\":\"@timestamp\",\"interval\":\"auto\",\"customInterval\":\"2h\",\"min_doc_count\":1,\"extended_bounds\":{}}},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"group\",\"params\":{\"field\":\"nfs.status\",\"size\":12,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}", "description": "", "title": "NFS errors", "uiStateJSON": "{}", "version": 1, - "savedSearchId": "nfs", + "savedSearchId": "NFS-errors-search", "kibanaSavedObjectMeta": { "searchSourceJSON": "{\"filter\":[]}" } diff --git a/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/Navigation.json b/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/Navigation.json index af168cd4..c2f25a17 100644 --- a/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/Navigation.json +++ b/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/visualization/Navigation.json @@ -1,5 +1,5 @@ { - "visState": "{\"title\":\"Navigation\",\"type\":\"markdown\",\"params\":{\"markdown\":\"### Packetbeat:\\n\\n[Overview](#/dashboard/Packetbeat-Dashboard)\\n\\n[Flows](#/dashboard/Packetbeat-Flows)\\n\\n[Web transactions](#/dashboard/Packetbeat-HTTP)\\n\\n[MySQL performance](#/dashboard/Packetbeat-MySQL-performance)\\n\\n[PostgreSQL performance](#/dashboard/Packetbeat-PgSQL-performance)\\n\\n[MongoDB performance](#/dashboard/Packetbeat-MongoDB-performance)\\n\\n[Thrift-RPC performance](#/dashboard/Packetbeat-Thrift-performance)\\n\\n[NFS transactions](#/dashboard/Packetbeat-NFS)\\n\\n[Cassandra performance](#/dashboard/Packetbeat-Cassandra)\"},\"aggs\":[],\"listeners\":{}}", + "visState": "{\"title\":\"Navigation\",\"type\":\"markdown\",\"params\":{\"markdown\":\"### Packetbeat:\\n\\n[Overview](#/dashboard/Packetbeat-Dashboard)\\n\\n[Flows](#/dashboard/Packetbeat-Flows)\\n\\n[Web transactions](#/dashboard/Packetbeat-HTTP)\\n\\n[MySQL performance](#/dashboard/Packetbeat-MySQL-performance)\\n\\n[PostgreSQL performance](#/dashboard/Packetbeat-PgSQL-performance)\\n\\n[MongoDB performance](#/dashboard/Packetbeat-MongoDB-performance)\\n\\n[Thrift-RPC performance](#/dashboard/Packetbeat-Thrift-performance)\\n\\n[NFS transactions](#/dashboard/Packetbeat-NFS)\\n\\n[Cassandra performance](#/dashboard/Packetbeat-Cassandra)\"},\"aggs\":[],\"listeners\":{}}", "description": "", "title": "Navigation", "uiStateJSON": "{}", diff --git a/vendor/github.com/elastic/beats/packetbeat/docs/gettingstarted.asciidoc b/vendor/github.com/elastic/beats/packetbeat/docs/gettingstarted.asciidoc index b0956bbc..e11c6717 100644 --- a/vendor/github.com/elastic/beats/packetbeat/docs/gettingstarted.asciidoc +++ b/vendor/github.com/elastic/beats/packetbeat/docs/gettingstarted.asciidoc @@ -31,7 +31,7 @@ Redhat/Centos/Fedora, <> for OS X, and <> for Windows). [NOTE] ================================================== -If you use Apt or Yum, you can {libbeat}/setup-repositories.html[install Packetbeat from our repositories] to update to the newest version more easily. +If you use Apt or Yum, you can <> to update to the newest version more easily. See our https://www.elastic.co/downloads/beats/packetbeat[download page] for other installation options, such as 32-bit images. ================================================== @@ -100,7 +100,8 @@ more information about these options, see <>. To configure Packetbeat, you edit the configuration file. For rpm and deb, you'll find the configuration file at `/etc/packetbeat/packetbeat.yml`. For mac and win, look in -the archive that you just extracted. +the archive that you just extracted. There’s also a full example configuration file called +`packetbeat.full.yml` that shows all non-deprecated options. To configure Packetbeat: @@ -204,7 +205,11 @@ include::../../libbeat/docs/shared-template-load.asciidoc[] [[packetbeat-starting]] === Step 4: Starting Packetbeat -Run Packetbeat by issuing the following command: +Run Packetbeat by issuing the command that is appropriate for your platform. + +NOTE: If you use an init.d script to start Packetbeat on deb or rpm, you can't +specify command line flags (see <>). To specify flags, +start Packetbeat in the foreground. *deb:* diff --git a/vendor/github.com/elastic/beats/packetbeat/docs/images/kibana-created-indexes.png b/vendor/github.com/elastic/beats/packetbeat/docs/images/kibana-created-indexes.png index 17949915..343675b0 100644 Binary files a/vendor/github.com/elastic/beats/packetbeat/docs/images/kibana-created-indexes.png and b/vendor/github.com/elastic/beats/packetbeat/docs/images/kibana-created-indexes.png differ diff --git a/vendor/github.com/elastic/beats/packetbeat/docs/images/kibana-navigation-vis.png b/vendor/github.com/elastic/beats/packetbeat/docs/images/kibana-navigation-vis.png index d80f3505..71b87ffe 100644 Binary files a/vendor/github.com/elastic/beats/packetbeat/docs/images/kibana-navigation-vis.png and b/vendor/github.com/elastic/beats/packetbeat/docs/images/kibana-navigation-vis.png differ diff --git a/vendor/github.com/elastic/beats/packetbeat/docs/index.asciidoc b/vendor/github.com/elastic/beats/packetbeat/docs/index.asciidoc index 3adf07e6..e99aad73 100644 --- a/vendor/github.com/elastic/beats/packetbeat/docs/index.asciidoc +++ b/vendor/github.com/elastic/beats/packetbeat/docs/index.asciidoc @@ -1,6 +1,6 @@ = Packetbeat Reference -include::./version.asciidoc[] +include::../../libbeat/docs/version.asciidoc[] :libbeat: http://www.elastic.co/guide/en/beats/libbeat/{doc-branch} :packetbeat: http://www.elastic.co/guide/en/beats/packetbeat/{doc-branch} @@ -9,7 +9,7 @@ include::./version.asciidoc[] :winlogbeat: http://www.elastic.co/guide/en/beats/winlogbeat/{doc-branch} :elasticsearch: https://www.elastic.co/guide/en/elasticsearch/reference/{doc-branch} :logstashdoc: https://www.elastic.co/guide/en/logstash/{doc-branch} -:securitydoc: https://www.elastic.co/guide/en/x-pack/5.0 +:securitydoc: https://www.elastic.co/guide/en/x-pack/5.2 :kibanadoc: https://www.elastic.co/guide/en/kibana/{doc-branch} :plugindoc: https://www.elastic.co/guide/en/elasticsearch/plugins/{doc-branch} :version: {stack-version} @@ -21,10 +21,13 @@ include::./overview.asciidoc[] include::./gettingstarted.asciidoc[] + include::./command-line.asciidoc[] include::../../libbeat/docs/shared-directory-layout.asciidoc[] +include::../../libbeat/docs/repositories.asciidoc[] + include::./upgrading.asciidoc[] include::./configuring-howto.asciidoc[] diff --git a/vendor/github.com/elastic/beats/packetbeat/docs/securing-packetbeat.asciidoc b/vendor/github.com/elastic/beats/packetbeat/docs/securing-packetbeat.asciidoc index c0da3e5a..c40a0483 100644 --- a/vendor/github.com/elastic/beats/packetbeat/docs/securing-packetbeat.asciidoc +++ b/vendor/github.com/elastic/beats/packetbeat/docs/securing-packetbeat.asciidoc @@ -9,12 +9,16 @@ The following topics describe how to secure communication between Packetbeat and * <> * <> +//sets block macro for https.asciidoc included in next section + -- [[securing-communication-elasticsearch]] == Securing Communication With Elasticsearch include::../../libbeat/docs/https.asciidoc[] +//sets block macro for shared-ssl-logstash-config.asciidoc included in next section + [[configuring-ssl-logstash]] == Securing Communication With Logstash by Using SSL include::../../libbeat/docs/shared-ssl-logstash-config.asciidoc[] diff --git a/vendor/github.com/elastic/beats/packetbeat/docs/troubleshooting.asciidoc b/vendor/github.com/elastic/beats/packetbeat/docs/troubleshooting.asciidoc index 2dbb7cee..132ede8e 100644 --- a/vendor/github.com/elastic/beats/packetbeat/docs/troubleshooting.asciidoc +++ b/vendor/github.com/elastic/beats/packetbeat/docs/troubleshooting.asciidoc @@ -11,7 +11,7 @@ following tips: * <> * <> - +//sets block macro for getting-help.asciidoc included in next section -- @@ -20,11 +20,15 @@ following tips: include::../../libbeat/docs/getting-help.asciidoc[] +//sets block macro for debugging.asciidoc included in next section + [[enable-packetbeat-debugging]] == Debugging include::../../libbeat/docs/debugging.asciidoc[] +//sets block macro for recording-trace content included in next section + [[recording-trace]] == Recording a Trace diff --git a/vendor/github.com/elastic/beats/packetbeat/packetbeat.template-es2x.json b/vendor/github.com/elastic/beats/packetbeat/packetbeat.template-es2x.json index 42b344a4..ff29f9dc 100644 --- a/vendor/github.com/elastic/beats/packetbeat/packetbeat.template-es2x.json +++ b/vendor/github.com/elastic/beats/packetbeat/packetbeat.template-es2x.json @@ -7,7 +7,7 @@ } }, "_meta": { - "version": "5.1.2" + "version": "5.2.2" }, "dynamic_templates": [ { diff --git a/vendor/github.com/elastic/beats/packetbeat/packetbeat.template.json b/vendor/github.com/elastic/beats/packetbeat/packetbeat.template.json index 543d0ca0..4585a8c8 100644 --- a/vendor/github.com/elastic/beats/packetbeat/packetbeat.template.json +++ b/vendor/github.com/elastic/beats/packetbeat/packetbeat.template.json @@ -5,7 +5,7 @@ "norms": false }, "_meta": { - "version": "5.1.2" + "version": "5.2.2" }, "dynamic_templates": [ { @@ -1359,6 +1359,7 @@ }, "order": 0, "settings": { + "index.mapping.total_fields.limit": 10000, "index.refresh_interval": "5s" }, "template": "packetbeat-*" diff --git a/vendor/github.com/elastic/beats/packetbeat/protos/cassandra/internal/gocql/marshal.go b/vendor/github.com/elastic/beats/packetbeat/protos/cassandra/internal/gocql/marshal.go index a8d3f460..9a7d8ff8 100644 --- a/vendor/github.com/elastic/beats/packetbeat/protos/cassandra/internal/gocql/marshal.go +++ b/vendor/github.com/elastic/beats/packetbeat/protos/cassandra/internal/gocql/marshal.go @@ -12,9 +12,10 @@ import ( "time" "errors" + "strings" + "github.com/elastic/beats/libbeat/logp" "gopkg.in/inf.v0" - "strings" ) // TypeInfo describes a Cassandra specific data type. @@ -530,19 +531,12 @@ func FrameOpFromString(s string) (FrameOp, error) { return op, nil } -func (f *FrameOp) Unpack(in interface{}) error { - s, ok := in.(string) - if !ok { - return errors.New("expected string") - } - +func (f *FrameOp) Unpack(s string) error { op, err := FrameOpFromString(s) - if err != nil { - return err + if err == nil { + *f = op } - - *f = op - return nil + return err } const ( diff --git a/vendor/github.com/elastic/beats/packetbeat/protos/dns/dns_test.go b/vendor/github.com/elastic/beats/packetbeat/protos/dns/dns_test.go index 57b3dbbf..16d0364a 100644 --- a/vendor/github.com/elastic/beats/packetbeat/protos/dns/dns_test.go +++ b/vendor/github.com/elastic/beats/packetbeat/protos/dns/dns_test.go @@ -274,7 +274,7 @@ func TestRRsToMapStrsWithOPTRecord(t *testing.T) { r.Preference = 10 r.Mx = "mx.miek.nl." - // The OPT record is a psuedo-record so it doesn't become a real record + // The OPT record is a pseudo-record so it doesn't become a real record // in our conversion, and there will be 1 entry instead of 2. mapStrs := rrsToMapStrs([]mkdns.RR{o, r}) assert.Len(t, mapStrs, 1) diff --git a/vendor/github.com/elastic/beats/packetbeat/protos/http/http.go b/vendor/github.com/elastic/beats/packetbeat/protos/http/http.go index b28e2df2..e12e7f13 100644 --- a/vendor/github.com/elastic/beats/packetbeat/protos/http/http.go +++ b/vendor/github.com/elastic/beats/packetbeat/protos/http/http.go @@ -542,10 +542,8 @@ func (http *httpPlugin) collectHeaders(m *message) interface{} { if strings.ToLower(name) == "content-length" { continue } - if http.splitCookie { - if name == cookie { - hdrs[name] = splitCookiesHeader(string(value)) - } + if http.splitCookie && name == cookie { + hdrs[name] = splitCookiesHeader(string(value)) } else { hdrs[name] = value } diff --git a/vendor/github.com/elastic/beats/testing/environments/2x.yml b/vendor/github.com/elastic/beats/testing/environments/2x.yml new file mode 100644 index 00000000..9749e644 --- /dev/null +++ b/vendor/github.com/elastic/beats/testing/environments/2x.yml @@ -0,0 +1,26 @@ +# This should test the environment with the latest snapshots +# This is based on base.yml + +version: '2' +services: + elasticsearch: + build: + context: ./docker/elasticsearch + dockerfile: Dockerfile-2x + args: + ELASTICSEARCH_VERSION: 2.4.2 + command: elasticsearch -Des.network.host=0.0.0.0 + + logstash: + build: + context: ./docker/logstash + dockerfile: Dockerfile-2x + args: + LOGSTASH_VERSION: 2.4.1 + + kibana: + build: + context: ./docker/kibana + dockerfile: Dockerfile-4.6 + args: + KIBANA_VERSION: 4.6.3 diff --git a/vendor/github.com/elastic/beats/testing/environments/Dockerfile b/vendor/github.com/elastic/beats/testing/environments/Dockerfile index bf6f60b2..0543985d 100644 --- a/vendor/github.com/elastic/beats/testing/environments/Dockerfile +++ b/vendor/github.com/elastic/beats/testing/environments/Dockerfile @@ -6,5 +6,3 @@ MAINTAINER Nicolas Ruflin RUN apt-get update && \ apt-get install -y curl nano wget zip && \ apt-get clean - - diff --git a/vendor/github.com/elastic/beats/testing/environments/Makefile b/vendor/github.com/elastic/beats/testing/environments/Makefile index 406b3571..555e9bde 100644 --- a/vendor/github.com/elastic/beats/testing/environments/Makefile +++ b/vendor/github.com/elastic/beats/testing/environments/Makefile @@ -1,5 +1,5 @@ ENV?=latest.yml -BASE_COMMAND=docker-compose -f base.yml -f ports.yml -f ${ENV} +BASE_COMMAND=docker-compose -f ${ENV} -f local.yml start: # This is run every time to make sure the environment is up-to-date @@ -8,7 +8,7 @@ start: stop: ${BASE_COMMAND} stop - ${BASE_COMMAND} rm -f + ${BASE_COMMAND} rm up: diff --git a/vendor/github.com/elastic/beats/testing/environments/README.md b/vendor/github.com/elastic/beats/testing/environments/README.md index df72c5ab..e0653b24 100644 --- a/vendor/github.com/elastic/beats/testing/environments/README.md +++ b/vendor/github.com/elastic/beats/testing/environments/README.md @@ -9,9 +9,9 @@ The different environments can be started with the following commands for manual Running the environment chains the following docker-compose files together -* base.yml: Basic environment -* ports.yml: Definition of ports which have to be exposed +* local.yml: Definition of ports which have to be exposed for local testing including kibana * latest.yml: Latest version of elasticsearch, logstash, kibana +* snapshot.yml: Snapshot version of elasticsearch, logstash, kibana ## Start / Stop environment @@ -76,7 +76,6 @@ This will run the full testsuite but with latest environments instead of snapsho By default, elasticsearch, logstash and kibana are started. These are available at all time that these environments are used. Running the environment, chains the following docker-compose flies together: -* base.yml: Basic environment * snapshot.yml: Snapshot version of elasticsearch, logstash, kibana * docker-compose.yml: Local beat docker-compose file diff --git a/vendor/github.com/elastic/beats/testing/environments/docker/elasticsearch/Dockerfile-2x b/vendor/github.com/elastic/beats/testing/environments/docker/elasticsearch/Dockerfile-2x new file mode 100644 index 00000000..0aba41f9 --- /dev/null +++ b/vendor/github.com/elastic/beats/testing/environments/docker/elasticsearch/Dockerfile-2x @@ -0,0 +1,45 @@ +FROM java:8-jre + +# grab gosu for easy step-down from root +RUN gpg --keyserver ha.pool.sks-keyservers.net --recv-keys B42F6819007F00F88E364FD4036A9C25BF357DD4 +RUN arch="$(dpkg --print-architecture)" \ + && set -x \ + && curl -o /usr/local/bin/gosu -fSL "https://github.com/tianon/gosu/releases/download/1.3/gosu-$arch" \ + && curl -o /usr/local/bin/gosu.asc -fSL "https://github.com/tianon/gosu/releases/download/1.3/gosu-$arch.asc" \ + && gpg --verify /usr/local/bin/gosu.asc \ + && rm /usr/local/bin/gosu.asc \ + && chmod +x /usr/local/bin/gosu + +RUN apt-key adv --keyserver ha.pool.sks-keyservers.net --recv-keys 46095ACC8548582C1A2699A9D27D666CD88E42B4 + +ARG ELASTICSEARCH_VERSION + +RUN wget https://download.elastic.co/elasticsearch/release/org/elasticsearch/distribution/deb/elasticsearch/${ELASTICSEARCH_VERSION}/elasticsearch-${ELASTICSEARCH_VERSION}.deb + +RUN dpkg -i elasticsearch-${ELASTICSEARCH_VERSION}.deb + +ENV PATH /usr/share/elasticsearch/bin:$PATH + +RUN set -ex \ + && for path in \ + /usr/share/elasticsearch/data \ + /usr/share/elasticsearch/logs \ + /usr/share/elasticsearch/config \ + /usr/share/elasticsearch/config/scripts \ + ; do \ + mkdir -p "$path"; \ + chown -R elasticsearch:elasticsearch "$path"; \ + done + +COPY config /usr/share/elasticsearch/config + +VOLUME /usr/share/elasticsearch/data + +COPY docker-entrypoint.sh / + +EXPOSE 9200 9300 + +CMD ["elasticsearch"] + + +ENTRYPOINT ["/docker-entrypoint.sh"] diff --git a/vendor/github.com/elastic/beats/testing/environments/docker/elasticsearch/Dockerfile-snapshot b/vendor/github.com/elastic/beats/testing/environments/docker/elasticsearch/Dockerfile-snapshot index 3211e15e..6fc3384c 100644 --- a/vendor/github.com/elastic/beats/testing/environments/docker/elasticsearch/Dockerfile-snapshot +++ b/vendor/github.com/elastic/beats/testing/environments/docker/elasticsearch/Dockerfile-snapshot @@ -32,6 +32,11 @@ USER elasticsearch # Install xpack #RUN eval ${ES_JAVA_OPTS:-} elasticsearch-plugin install --batch ${XPACK} + + +RUN elasticsearch-plugin install --batch https://staging.elastic.co/5.2.1-f614a4d5/downloads/elasticsearch-plugins/ingest-user-agent/ingest-user-agent-5.2.1.zip +RUN elasticsearch-plugin install --batch https://staging.elastic.co/5.2.1-f614a4d5/downloads/elasticsearch-plugins/ingest-geoip/ingest-geoip-5.2.1.zip + COPY config/elasticsearch.yml config/ COPY config/log4j2.properties config/ COPY bin/es-docker bin/es-docker diff --git a/vendor/github.com/elastic/beats/testing/environments/docker/kibana/Dockerfile-4.6 b/vendor/github.com/elastic/beats/testing/environments/docker/kibana/Dockerfile-4.6 new file mode 100644 index 00000000..69519206 --- /dev/null +++ b/vendor/github.com/elastic/beats/testing/environments/docker/kibana/Dockerfile-4.6 @@ -0,0 +1,40 @@ +FROM debian:jessie + +# add our user and group first to make sure their IDs get assigned consistently +RUN groupadd -r kibana && useradd -r -m -g kibana kibana + +RUN apt-get update && apt-get install -y ca-certificates curl --no-install-recommends && rm -rf /var/lib/apt/lists/* + +# grab gosu for easy step-down from root +RUN gpg --keyserver ha.pool.sks-keyservers.net --recv-keys B42F6819007F00F88E364FD4036A9C25BF357DD4 +RUN arch="$(dpkg --print-architecture)" \ + && set -x \ + && curl -o /usr/local/bin/gosu -SL "https://github.com/tianon/gosu/releases/download/1.3/gosu-$arch" \ + && curl -o /usr/local/bin/gosu.asc -SL "https://github.com/tianon/gosu/releases/download/1.3/gosu-$arch.asc" \ + && gpg --verify /usr/local/bin/gosu.asc \ + && rm /usr/local/bin/gosu.asc \ + && chmod +x /usr/local/bin/gosu + +ARG KIBANA_VERSION + +RUN set -x \ + && curl -fSL "https://download.elastic.co/kibana/kibana/kibana-${KIBANA_VERSION}-linux-x86_64.tar.gz" -o kibana.tar.gz \ + && mkdir -p /opt/kibana \ + && tar -xz --strip-components=1 -C /opt/kibana -f kibana.tar.gz \ + && chown -R kibana:kibana /opt/kibana \ + && rm kibana.tar.gz + +ENV PATH /opt/kibana/bin:$PATH + +COPY ./docker-entrypoint.sh / + +RUN gosu kibana kibana plugin --install elastic/sense +RUN gosu kibana kibana plugin --install kibana/timelion +RUN gosu kibana kibana plugin --install kibana/reporting/2.4.0 + +# generate a random key +RUN echo "reporting.encryptionKey: `date +%s | sha256sum | base64 | head -c 32`" >> /opt/kibana/config/kibana.yml + +EXPOSE 5601 +ENTRYPOINT ["/docker-entrypoint.sh"] +CMD ["kibana"] diff --git a/vendor/github.com/elastic/beats/testing/environments/docker/kibana/Dockerfile-snapshot b/vendor/github.com/elastic/beats/testing/environments/docker/kibana/Dockerfile-snapshot index 5443b62c..d05f4739 100644 --- a/vendor/github.com/elastic/beats/testing/environments/docker/kibana/Dockerfile-snapshot +++ b/vendor/github.com/elastic/beats/testing/environments/docker/kibana/Dockerfile-snapshot @@ -2,7 +2,7 @@ FROM docker.elastic.co/kibana/kibana-ubuntu-base:latest MAINTAINER Elastic Docker Team -ARG KIBANA_DOWNLOAD_URL=https://staging.elastic.co/5.1.2-429c1ec3/downloads/kibana/kibana-5.1.2-linux-x86_64.tar.gz +ARG KIBANA_DOWNLOAD_URL=https://staging.elastic.co/5.2.1-f614a4d5/downloads/kibana/kibana-5.2.1-linux-x86_64.tar.gz ARG X_PACK_URL EXPOSE 5601 diff --git a/vendor/github.com/elastic/beats/testing/environments/docker/logstash/Dockerfile b/vendor/github.com/elastic/beats/testing/environments/docker/logstash/Dockerfile new file mode 100644 index 00000000..9eb9c686 --- /dev/null +++ b/vendor/github.com/elastic/beats/testing/environments/docker/logstash/Dockerfile @@ -0,0 +1,28 @@ +FROM java:8-jre + +ARG LS_DOWNLOAD_URL +ARG LS_VERSION + +ENV URL ${LS_DOWNLOAD_URL}/logstash-${LS_VERSION}.tar.gz +ENV PATH $PATH:/opt/logstash-${LS_VERSION}/bin + +# Cache variable can be set during building to invalidate the build cache with `--build-arg CACHE=$(date +%s) .` +ARG CACHE=1 + +# As all snapshot builds have the same url, the image is cached. The date at then can be used to invalidate the image +RUN set -x && \ + cd /opt && \ + wget -qO logstash.tar.gz $URL?${CACHE} && \ + tar xzf logstash.tar.gz + + +COPY logstash.conf.tmpl /logstash.conf.tmpl +COPY docker-entrypoint.sh /entrypoint.sh + +COPY pki /etc/pki + +ENTRYPOINT ["/entrypoint.sh"] + +EXPOSE 5044 5055 + +CMD logstash -f /logstash.conf --log.level=debug --config.debug diff --git a/vendor/github.com/elastic/beats/testing/environments/docker/logstash/Dockerfile-2x b/vendor/github.com/elastic/beats/testing/environments/docker/logstash/Dockerfile-2x new file mode 100644 index 00000000..ea8b1682 --- /dev/null +++ b/vendor/github.com/elastic/beats/testing/environments/docker/logstash/Dockerfile-2x @@ -0,0 +1,30 @@ +FROM java:8-jre + +ARG LOGSTASH_VERSION + +ENV DEB_URL https://download.elastic.co/logstash/logstash/packages/debian/logstash-${LOGSTASH_VERSION}_all.deb + +ENV PATH $PATH:/opt/logstash/bin:/opt/logstash/vendor/jruby/bin + +# install logstash +RUN set -x && \ + mkdir -p /var/tmp && \ + wget -qO /var/tmp/logstash.deb $DEB_URL && \ + apt-get update -y && \ + apt-get install -y logrotate git && \ + dpkg -i /var/tmp/logstash.deb && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* + +RUN plugin install logstash-input-beats + +COPY logstash.conf.tmpl /logstash.conf.tmpl +COPY docker-entrypoint.sh /entrypoint.sh + +COPY pki /etc/pki + +ENTRYPOINT ["/entrypoint.sh"] + +CMD logstash agent -f /logstash.conf + + diff --git a/vendor/github.com/elastic/beats/testing/environments/docker/logstash/docker-entrypoint.sh b/vendor/github.com/elastic/beats/testing/environments/docker/logstash/docker-entrypoint.sh index 70db1bb5..f9147a5e 100755 --- a/vendor/github.com/elastic/beats/testing/environments/docker/logstash/docker-entrypoint.sh +++ b/vendor/github.com/elastic/beats/testing/environments/docker/logstash/docker-entrypoint.sh @@ -58,21 +58,12 @@ waitForElasticsearch() { exit 1 } -updateConfigFile_1_5() { - sed -e "s/host .*/host => \"$ES_HOST\"/" /logstash.conf.1.5.tmpl > /logstash.conf -} - -updateConfigFile_2() { - sed -e "s/hosts.*/hosts => [\"$ES_HOST:$ES_PORT\"]/" /logstash.conf.2.tmpl > /logstash.conf +updateConfigFile() { + sed -e "s/hosts.*/hosts => [\"$ES_HOST:$ES_PORT\"]/" /logstash.conf.tmpl > /logstash.conf } # Main -readParams -if [ "$LS_VERSION" == "1.5" ]; then - updateConfigFile_1_5 -else - updateConfigFile_2 -fi +updateConfigFile waitForElasticsearch exec "$@" diff --git a/vendor/github.com/elastic/beats/testing/environments/docker/logstash/logstash.conf.tmpl b/vendor/github.com/elastic/beats/testing/environments/docker/logstash/logstash.conf.tmpl new file mode 100644 index 00000000..26ae7751 --- /dev/null +++ b/vendor/github.com/elastic/beats/testing/environments/docker/logstash/logstash.conf.tmpl @@ -0,0 +1,27 @@ +input { + beats { + port => 5044 + ssl => false + } + + beats { + port => 5055 + ssl => true + ssl_certificate => "/etc/pki/tls/certs/logstash.crt" + ssl_key => "/etc/pki/tls/private/logstash.key" + } +} + + +output { + elasticsearch { + hosts => [] + #user => "beats" + #password => "testing" + index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}" + document_type => "%{[@metadata][type]}" + } + + # Used for easier debugging + #stdout { codec => rubydebug { metadata => true } } +} diff --git a/vendor/github.com/elastic/beats/testing/environments/docker/sredis/Dockerfile b/vendor/github.com/elastic/beats/testing/environments/docker/sredis/Dockerfile index adbc826c..3b6e9551 100644 --- a/vendor/github.com/elastic/beats/testing/environments/docker/sredis/Dockerfile +++ b/vendor/github.com/elastic/beats/testing/environments/docker/sredis/Dockerfile @@ -1,8 +1,6 @@ -FROM debian:8.4 +FROM alpine:edge -RUN apt-get update \ - && apt-get upgrade -y \ - && apt-get install stunnel4 -y +RUN apk add --no-cache stunnel COPY stunnel.conf /etc/stunnel/stunnel.conf COPY pki /etc/pki diff --git a/vendor/github.com/elastic/beats/testing/environments/latest.yml b/vendor/github.com/elastic/beats/testing/environments/latest.yml index 1fc1035f..dde14569 100644 --- a/vendor/github.com/elastic/beats/testing/environments/latest.yml +++ b/vendor/github.com/elastic/beats/testing/environments/latest.yml @@ -4,18 +4,23 @@ version: '2' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:latest + image: docker.elastic.co/elasticsearch/elasticsearch:5.1.1 environment: - - "ES_JAVA_OPTS=-Xms512m -Xmx512m" - - "network.host=" - - "transport.host=127.0.0.1" - - "http.host=0.0.0.0" - - "xpack.security.enabled=false" + - "ES_JAVA_OPTS=-Xms512m -Xmx512m" + - "network.host=" + - "transport.host=127.0.0.1" + - "http.host=0.0.0.0" + - "xpack.security.enabled=false" logstash: build: context: docker/logstash - dockerfile: Dockerfile-latest + dockerfile: Dockerfile + args: + LS_VERSION: 5.1.1 + LS_DOWNLOAD_URL: https://artifacts.elastic.co/downloads/logstash + environment: + - ES_HOST=elasticsearch kibana: - image: docker.elastic.co/kibana/kibana:latest + image: docker.elastic.co/kibana/kibana:5.1.1 diff --git a/vendor/github.com/elastic/beats/testing/environments/local.yml b/vendor/github.com/elastic/beats/testing/environments/local.yml new file mode 100644 index 00000000..53881950 --- /dev/null +++ b/vendor/github.com/elastic/beats/testing/environments/local.yml @@ -0,0 +1,28 @@ +# Defines if ports should be exported. +# This is useful for testing locally with a full elastic stack setup. +# All services can be reached through localhost like localhost:5601 for Kibana +# This is not used for CI as otherwise ports conflicts could happen. +version: '2' +services: + kibana: + ports: + - "5601:5601" + environment: + - ELASTICSEARCH_URL=http://elasticsearch:9200/ + + elasticsearch: + ports: + - "9200:9200" + + logstash: + ports: + - "5044:5044" + - "5055:5055" + + # Makes sure containers keep running for manual testing + beat: + build: . + depends_on: + - elasticsearch + - kibana + - logstash diff --git a/vendor/github.com/elastic/beats/testing/environments/snapshot.yml b/vendor/github.com/elastic/beats/testing/environments/snapshot.yml index 695c5acc..2b02c5ed 100644 --- a/vendor/github.com/elastic/beats/testing/environments/snapshot.yml +++ b/vendor/github.com/elastic/beats/testing/environments/snapshot.yml @@ -7,8 +7,8 @@ services: context: ./docker/elasticsearch dockerfile: Dockerfile-snapshot args: - ELASTIC_VERSION: 5.1.2 - ES_DOWNLOAD_URL: 'https://staging.elastic.co/5.1.2-429c1ec3/downloads/elasticsearch' + ELASTIC_VERSION: 5.2.1 + ES_DOWNLOAD_URL: https://staging.elastic.co/5.2.1-f614a4d5/downloads/elasticsearch #XPACK: http://snapshots.elastic.co/downloads/packs/x-pack/x-pack-6.0.0-alpha1-SNAPSHOT.zip environment: - "ES_JAVA_OPTS=-Xms512m -Xmx512m" @@ -19,12 +19,17 @@ services: logstash: build: context: ./docker/logstash - dockerfile: Dockerfile-snapshot + dockerfile: Dockerfile + args: + LS_VERSION: 5.2.1 + LS_DOWNLOAD_URL: https://staging.elastic.co/5.2.1-f614a4d5/downloads/logstash + environment: + - ES_HOST=elasticsearch kibana: build: context: ./docker/kibana dockerfile: Dockerfile-snapshot args: [ ] - #KIBANA_DOWNLOAD_URL: 'https://staging.elastic.co/5.1.0-1106bba6/downloads/kibana/kibana-5.1.0-linux-x86_64.tar.gz' + #KIBANA_DOWNLOAD_URL: https://snapshots.elastic.co/downloads/kibana/kibana-6.0.0-alpha1-SNAPSHOT-linux-x86_64.tar.gz #X_PACK_URL: http://snapshots.elastic.co/downloads/kibana-plugins/x-pack/x-pack-6.0.0-alpha1-SNAPSHOT.zip diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/CHANGELOG.md b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/CHANGELOG.md index 49ff9216..aad986fe 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/CHANGELOG.md +++ b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/CHANGELOG.md @@ -1,5 +1,32 @@ # Changelog +#### Version 1.11.0 (2016-12-20) + +New Features: + - Metrics! Thanks to Sébastien Launay for all his work on this feature + ([#701](https://github.com/Shopify/sarama/pull/701), + [#746](https://github.com/Shopify/sarama/pull/746), + [#766](https://github.com/Shopify/sarama/pull/766)). + - Add support for LZ4 compression + ([#786](https://github.com/Shopify/sarama/pull/786)). + - Add support for ListOffsetRequest v1 and Kafka 0.10.1 + ([#775](https://github.com/Shopify/sarama/pull/775)). + - Added a `HighWaterMarks` method to the Consumer which aggregates the + `HighWaterMarkOffset` values of its child topic/partitions + ([#769](https://github.com/Shopify/sarama/pull/769)). + +Bug Fixes: + - Fixed producing when using timestamps, compression and Kafka 0.10 + ([#759](https://github.com/Shopify/sarama/pull/759)). + - Added missing decoder methods to DescribeGroups response + ([#756](https://github.com/Shopify/sarama/pull/756)). + - Fix producer shutdown when `Return.Errors` is disabled + ([#787](https://github.com/Shopify/sarama/pull/787)). + - Don't mutate configuration in SyncProducer + ([#790](https://github.com/Shopify/sarama/pull/790)). + - Fix crash on SASL initialization failure + ([#795](https://github.com/Shopify/sarama/pull/795)). + #### Version 1.10.1 (2016-08-30) Bug Fixes: diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/README.md b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/README.md index bcbd3e9c..c2968b92 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/README.md +++ b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/README.md @@ -18,7 +18,7 @@ Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apa Sarama provides a "2 releases + 2 months" compatibility guarantee: we support the two latest stable releases of Kafka and Go, and we provide a two month grace period for older releases. This means we currently officially support -Go 1.6 and 1.5, and Kafka 0.10.0, 0.9.0 and 0.8.2, although older releases are +Go 1.7 and 1.6, and Kafka 0.10.0 and 0.9.0, although older releases are still likely to work. Sarama follows semantic versioning and provides API stability via the gopkg.in service. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/Vagrantfile b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/Vagrantfile index 4586d9ae..f4b848a3 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/Vagrantfile +++ b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/Vagrantfile @@ -4,6 +4,7 @@ # Vagrantfile API/syntax version. Don't touch unless you know what you're doing! VAGRANTFILE_API_VERSION = "2" +# We have 5 * 192MB ZK processes and 5 * 320MB Kafka processes => 2560MB MEMORY = 3072 Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/async_producer.go b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/async_producer.go index e1ae5b0d..3af47fdd 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/async_producer.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/async_producer.go @@ -210,6 +210,8 @@ func (p *asyncProducer) Close() error { for event := range p.errors { errors = append(errors, event) } + } else { + <-p.errors } if len(errors) > 0 { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/broker.go b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/broker.go index d0ebbbc0..89beecc8 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/broker.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/broker.go @@ -32,18 +32,21 @@ type Broker struct { incomingByteRate metrics.Meter requestRate metrics.Meter requestSize metrics.Histogram + requestLatency metrics.Histogram outgoingByteRate metrics.Meter responseRate metrics.Meter responseSize metrics.Histogram brokerIncomingByteRate metrics.Meter brokerRequestRate metrics.Meter brokerRequestSize metrics.Histogram + brokerRequestLatency metrics.Histogram brokerOutgoingByteRate metrics.Meter brokerResponseRate metrics.Meter brokerResponseSize metrics.Histogram } type responsePromise struct { + requestTime time.Time correlationID int32 packets chan []byte errors chan error @@ -103,6 +106,7 @@ func (b *Broker) Open(conf *Config) error { b.incomingByteRate = metrics.GetOrRegisterMeter("incoming-byte-rate", conf.MetricRegistry) b.requestRate = metrics.GetOrRegisterMeter("request-rate", conf.MetricRegistry) b.requestSize = getOrRegisterHistogram("request-size", conf.MetricRegistry) + b.requestLatency = getOrRegisterHistogram("request-latency-in-ms", conf.MetricRegistry) b.outgoingByteRate = metrics.GetOrRegisterMeter("outgoing-byte-rate", conf.MetricRegistry) b.responseRate = metrics.GetOrRegisterMeter("response-rate", conf.MetricRegistry) b.responseSize = getOrRegisterHistogram("response-size", conf.MetricRegistry) @@ -112,13 +116,14 @@ func (b *Broker) Open(conf *Config) error { b.brokerIncomingByteRate = getOrRegisterBrokerMeter("incoming-byte-rate", b, conf.MetricRegistry) b.brokerRequestRate = getOrRegisterBrokerMeter("request-rate", b, conf.MetricRegistry) b.brokerRequestSize = getOrRegisterBrokerHistogram("request-size", b, conf.MetricRegistry) + b.brokerRequestLatency = getOrRegisterBrokerHistogram("request-latency-in-ms", b, conf.MetricRegistry) b.brokerOutgoingByteRate = getOrRegisterBrokerMeter("outgoing-byte-rate", b, conf.MetricRegistry) b.brokerResponseRate = getOrRegisterBrokerMeter("response-rate", b, conf.MetricRegistry) b.brokerResponseSize = getOrRegisterBrokerHistogram("response-size", b, conf.MetricRegistry) } if conf.Net.SASL.Enable { - b.connErr = b.saslAuthenticate() + b.connErr = b.sendAndReceiveSASLPlainAuth() if b.connErr != nil { err = b.conn.Close() if err == nil { @@ -366,7 +371,7 @@ func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, } req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb} - buf, err := encode(req) + buf, err := encode(req, b.conf.MetricRegistry) if err != nil { return nil, err } @@ -376,6 +381,7 @@ func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, return nil, err } + requestTime := time.Now() bytes, err := b.conn.Write(buf) b.updateOutgoingCommunicationMetrics(bytes) if err != nil { @@ -384,10 +390,12 @@ func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, b.correlationID++ if !promiseResponse { + // Record request latency without the response + b.updateRequestLatencyMetrics(time.Since(requestTime)) return nil, nil } - promise := responsePromise{req.correlationID, make(chan []byte), make(chan error)} + promise := responsePromise{requestTime, req.correlationID, make(chan []byte), make(chan error)} b.responses <- promise return &promise, nil @@ -476,8 +484,9 @@ func (b *Broker) responseReceiver() { } bytesReadHeader, err := io.ReadFull(b.conn, header) + requestLatency := time.Since(response.requestTime) if err != nil { - b.updateIncomingCommunicationMetrics(bytesReadHeader) + b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency) dead = err response.errors <- err continue @@ -486,13 +495,13 @@ func (b *Broker) responseReceiver() { decodedHeader := responseHeader{} err = decode(header, &decodedHeader) if err != nil { - b.updateIncomingCommunicationMetrics(bytesReadHeader) + b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency) dead = err response.errors <- err continue } if decodedHeader.correlationID != response.correlationID { - b.updateIncomingCommunicationMetrics(bytesReadHeader) + b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency) // TODO if decoded ID < cur ID, discard until we catch up // TODO if decoded ID > cur ID, save it so when cur ID catches up we have a response dead = PacketDecodingError{fmt.Sprintf("correlation ID didn't match, wanted %d, got %d", response.correlationID, decodedHeader.correlationID)} @@ -502,7 +511,7 @@ func (b *Broker) responseReceiver() { buf := make([]byte, decodedHeader.length-4) bytesReadBody, err := io.ReadFull(b.conn, buf) - b.updateIncomingCommunicationMetrics(bytesReadHeader + bytesReadBody) + b.updateIncomingCommunicationMetrics(bytesReadHeader+bytesReadBody, requestLatency) if err != nil { dead = err response.errors <- err @@ -514,10 +523,10 @@ func (b *Broker) responseReceiver() { close(b.done) } -func (b *Broker) sendAndReceiveSASLHandshake(method string) error { - rb := &SaslHandshakeRequest{method} +func (b *Broker) sendAndReceiveSASLPlainHandshake() error { + rb := &SaslHandshakeRequest{"PLAIN"} req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb} - buf, err := encode(req) + buf, err := encode(req, b.conf.MetricRegistry) if err != nil { return err } @@ -527,6 +536,7 @@ func (b *Broker) sendAndReceiveSASLHandshake(method string) error { return err } + requestTime := time.Now() bytes, err := b.conn.Write(buf) b.updateOutgoingCommunicationMetrics(bytes) if err != nil { @@ -536,16 +546,19 @@ func (b *Broker) sendAndReceiveSASLHandshake(method string) error { b.correlationID++ //wait for the response header := make([]byte, 8) // response header - n, err := io.ReadFull(b.conn, header) - b.updateIncomingCommunicationMetrics(n) + _, err = io.ReadFull(b.conn, header) + if err != nil { + Logger.Printf("Failed to read SASL handshake header : %s\n", err.Error()) + return err + } length := binary.BigEndian.Uint32(header[:4]) payload := make([]byte, length-4) - n, err = io.ReadFull(b.conn, payload) + n, err := io.ReadFull(b.conn, payload) if err != nil { Logger.Printf("Failed to read SASL handshake payload : %s\n", err.Error()) return err } - b.updateIncomingCommunicationMetrics(n) + b.updateIncomingCommunicationMetrics(n+8, time.Since(requestTime)) res := &SaslHandshakeResponse{} err = versionedDecode(payload, res, 0) if err != nil { @@ -553,21 +566,11 @@ func (b *Broker) sendAndReceiveSASLHandshake(method string) error { return err } if res.Err != ErrNoError { - Logger.Printf("Invalid SASL Mechanism : %s\n", err.Error()) + Logger.Printf("Invalid SASL Mechanism : %s\n", res.Err.Error()) return res.Err } - Logger.Print("Successul SASL handshake") + Logger.Print("Successful SASL handshake") return nil - -} - -func (b *Broker) saslAuthenticate() error { - err := b.sendAndReceiveSASLHandshake("PLAIN") - if err != nil { - Logger.Printf("Error performing SASL handshake %s: %s\n", b.addr, err) - return err - } - return b.sendAndReceiveSASLPlainAuth() } // Kafka 0.10.0 plans to support SASL Plain and Kerberos as per PR #812 (KIP-43)/(JIRA KAFKA-3149) @@ -589,6 +592,13 @@ func (b *Broker) saslAuthenticate() error { // When credentials are invalid, Kafka closes the connection. This does not seem to be the ideal way // of responding to bad credentials but thats how its being done today. func (b *Broker) sendAndReceiveSASLPlainAuth() error { + if b.conf.Net.SASL.Handshake { + handshakeErr := b.sendAndReceiveSASLPlainHandshake() + if handshakeErr != nil { + Logger.Printf("Error while performing SASL handshake %s\n", b.addr) + return handshakeErr + } + } length := 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password) authBytes := make([]byte, length+4) //4 byte length header + auth data binary.BigEndian.PutUint32(authBytes, uint32(length)) @@ -600,6 +610,7 @@ func (b *Broker) sendAndReceiveSASLPlainAuth() error { return err } + requestTime := time.Now() bytesWritten, err := b.conn.Write(authBytes) b.updateOutgoingCommunicationMetrics(bytesWritten) if err != nil { @@ -609,7 +620,7 @@ func (b *Broker) sendAndReceiveSASLPlainAuth() error { header := make([]byte, 4) n, err := io.ReadFull(b.conn, header) - b.updateIncomingCommunicationMetrics(n) + b.updateIncomingCommunicationMetrics(n, time.Since(requestTime)) // If the credentials are valid, we would get a 4 byte response filled with null characters. // Otherwise, the broker closes the connection and we get an EOF if err != nil { @@ -621,7 +632,8 @@ func (b *Broker) sendAndReceiveSASLPlainAuth() error { return nil } -func (b *Broker) updateIncomingCommunicationMetrics(bytes int) { +func (b *Broker) updateIncomingCommunicationMetrics(bytes int, requestLatency time.Duration) { + b.updateRequestLatencyMetrics(requestLatency) b.responseRate.Mark(1) if b.brokerResponseRate != nil { b.brokerResponseRate.Mark(1) @@ -637,6 +649,14 @@ func (b *Broker) updateIncomingCommunicationMetrics(bytes int) { } } +func (b *Broker) updateRequestLatencyMetrics(requestLatency time.Duration) { + requestLatencyInMs := int64(requestLatency / time.Millisecond) + b.requestLatency.Update(requestLatencyInMs) + if b.brokerRequestLatency != nil { + b.brokerRequestLatency.Update(requestLatencyInMs) + } +} + func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) { b.requestRate.Mark(1) if b.brokerRequestRate != nil { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/client.go b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/client.go index e9a9ea77..f869a143 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/client.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/client.go @@ -521,6 +521,9 @@ func (client *client) getOffset(topic string, partitionID int32, time int64) (in } request := &OffsetRequest{} + if client.conf.Version.IsAtLeast(V0_10_1_0) { + request.Version = 1 + } request.AddBlock(topic, partitionID, time, 1) response, err := broker.GetAvailableOffsets(request) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/config.go b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/config.go index 9cab8349..a417a38b 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/config.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/config.go @@ -43,6 +43,10 @@ type Config struct { // Whether or not to use SASL authentication when connecting to the broker // (defaults to false). Enable bool + // Whether or not to send the Kafka SASL handshake first if enabled + // (defaults to true). You should only set this to false if you're using + // a non-Kafka SASL proxy. + Handshake bool //username and password for SASL/PLAIN authentication User string Password string @@ -251,6 +255,7 @@ func NewConfig() *Config { c.Net.DialTimeout = 30 * time.Second c.Net.ReadTimeout = 30 * time.Second c.Net.WriteTimeout = 30 * time.Second + c.Net.SASL.Handshake = true c.Metadata.Retry.Max = 3 c.Metadata.Retry.Backoff = 250 * time.Millisecond @@ -375,6 +380,10 @@ func (c *Config) Validate() error { return ConfigurationError("Producer.Retry.Backoff must be >= 0") } + if c.Producer.Compression == CompressionLZ4 && !c.Version.IsAtLeast(V0_10_0_0) { + return ConfigurationError("lz4 compression requires Version >= V0_10_0_0") + } + // validate the Consumer values switch { case c.Consumer.Fetch.Min <= 0: diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/consumer.go b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/consumer.go index 5271e21d..ddac564f 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/consumer.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/consumer.go @@ -63,6 +63,10 @@ type Consumer interface { // or OffsetOldest ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) + // HighWaterMarks returns the current high water marks for each topic and partition. + // Consistency between partitions is not guaranteed since high water marks are updated separately. + HighWaterMarks() map[string]map[int32]int64 + // Close shuts down the consumer. It must be called after all child // PartitionConsumers have already been closed. Close() error @@ -163,6 +167,22 @@ func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) return child, nil } +func (c *consumer) HighWaterMarks() map[string]map[int32]int64 { + c.lock.Lock() + defer c.lock.Unlock() + + hwms := make(map[string]map[int32]int64) + for topic, p := range c.children { + hwm := make(map[int32]int64, len(p)) + for partition, pc := range p { + hwm[partition] = pc.HighWaterMarkOffset() + } + hwms[topic] = hwm + } + + return hwms +} + func (c *consumer) addChild(child *partitionConsumer) error { c.lock.Lock() defer c.lock.Unlock() diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/describe_groups_response.go b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/describe_groups_response.go index e78b8ce0..d2c2071e 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/describe_groups_response.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/describe_groups_response.go @@ -172,3 +172,15 @@ func (gmd *GroupMemberDescription) decode(pd packetDecoder) (err error) { return nil } + +func (gmd *GroupMemberDescription) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) { + assignment := new(ConsumerGroupMemberAssignment) + err := decode(gmd.MemberAssignment, assignment) + return assignment, err +} + +func (gmd *GroupMemberDescription) GetMemberMetadata() (*ConsumerGroupMemberMetadata, error) { + metadata := new(ConsumerGroupMemberMetadata) + err := decode(gmd.MemberMetadata, metadata) + return metadata, err +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/encoder_decoder.go b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/encoder_decoder.go index 35a24c2d..7ce3bc0f 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/encoder_decoder.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/encoder_decoder.go @@ -1,6 +1,10 @@ package sarama -import "fmt" +import ( + "fmt" + + "github.com/rcrowley/go-metrics" +) // Encoder is the interface that wraps the basic Encode method. // Anything implementing Encoder can be turned into bytes using Kafka's encoding rules. @@ -8,8 +12,8 @@ type encoder interface { encode(pe packetEncoder) error } -// Encode takes an Encoder and turns it into bytes. -func encode(e encoder) ([]byte, error) { +// Encode takes an Encoder and turns it into bytes while potentially recording metrics. +func encode(e encoder, metricRegistry metrics.Registry) ([]byte, error) { if e == nil { return nil, nil } @@ -27,6 +31,7 @@ func encode(e encoder) ([]byte, error) { } realEnc.raw = make([]byte, prepEnc.length) + realEnc.registry = metricRegistry err = e.encode(&realEnc) if err != nil { return nil, err diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/errors.go b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/errors.go index aa8f213d..cc3f623d 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/errors.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/errors.go @@ -108,6 +108,7 @@ const ( ErrUnsupportedSASLMechanism KError = 33 ErrIllegalSASLState KError = 34 ErrUnsupportedVersion KError = 35 + ErrUnsupportedForMessageFormat KError = 43 ) func (err KError) Error() string { @@ -188,6 +189,8 @@ func (err KError) Error() string { return "kafka server: Request is not valid given the current SASL state." case ErrUnsupportedVersion: return "kafka server: The version of API is not supported." + case ErrUnsupportedForMessageFormat: + return "kafka server: The requested operation is not supported by the message format version." } return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/examples/http_server/http_server.go b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/examples/http_server/http_server.go index 03e47b6b..b6d83c5d 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/examples/http_server/http_server.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/examples/http_server/http_server.go @@ -196,6 +196,7 @@ func newDataCollector(brokerList []string) sarama.SyncProducer { config := sarama.NewConfig() config.Producer.RequiredAcks = sarama.WaitForAll // Wait for all in-sync replicas to ack the message config.Producer.Retry.Max = 10 // Retry up to 10 times to produce the message + config.Producer.Return.Successes = true tlsConfig := createTlsConfiguration() if tlsConfig != nil { config.Net.TLS.Config = tlsConfig diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/join_group_request.go b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/join_group_request.go index d95085b2..656db456 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/join_group_request.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/join_group_request.go @@ -98,7 +98,7 @@ func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) { } func (r *JoinGroupRequest) AddGroupProtocolMetadata(name string, metadata *ConsumerGroupMemberMetadata) error { - bin, err := encode(metadata) + bin, err := encode(metadata, nil) if err != nil { return err } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/message.go b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/message.go index 0f0ca5b6..327c5fa2 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/message.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/message.go @@ -8,6 +8,7 @@ import ( "time" "github.com/eapache/go-xerial-snappy" + "github.com/pierrec/lz4" ) // CompressionCodec represents the various compression codecs recognized by Kafka in messages. @@ -20,6 +21,7 @@ const ( CompressionNone CompressionCodec = 0 CompressionGZIP CompressionCodec = 1 CompressionSnappy CompressionCodec = 2 + CompressionLZ4 CompressionCodec = 3 ) type Message struct { @@ -31,6 +33,7 @@ type Message struct { Timestamp time.Time // the timestamp of the message (version 1+ only) compressedCache []byte + compressedSize int // used for computing the compression ratio metrics } func (m *Message) encode(pe packetEncoder) error { @@ -74,9 +77,23 @@ func (m *Message) encode(pe packetEncoder) error { tmp := snappy.Encode(m.Value) m.compressedCache = tmp payload = m.compressedCache + case CompressionLZ4: + var buf bytes.Buffer + writer := lz4.NewWriter(&buf) + if _, err = writer.Write(m.Value); err != nil { + return err + } + if err = writer.Close(); err != nil { + return err + } + m.compressedCache = buf.Bytes() + payload = m.compressedCache + default: return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", m.Codec)} } + // Keep in mind the compressed payload size for metric gathering + m.compressedSize = len(payload) } if err = pe.putBytes(payload); err != nil { @@ -121,6 +138,10 @@ func (m *Message) decode(pd packetDecoder) (err error) { return err } + // Required for deep equal assertion during tests but might be useful + // for future metrics about the compression ratio in fetch requests + m.compressedSize = len(m.Value) + switch m.Codec { case CompressionNone: // nothing to do @@ -148,6 +169,18 @@ func (m *Message) decode(pd packetDecoder) (err error) { if err := m.decodeSet(); err != nil { return err } + case CompressionLZ4: + if m.Value == nil { + break + } + reader := lz4.NewReader(bytes.NewReader(m.Value)) + if m.Value, err = ioutil.ReadAll(reader); err != nil { + return err + } + if err := m.decodeSet(); err != nil { + return err + } + default: return PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", m.Codec)} } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/metrics.go b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/metrics.go index 2b08d398..4869708e 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/metrics.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/metrics.go @@ -2,6 +2,7 @@ package sarama import ( "fmt" + "strings" "github.com/rcrowley/go-metrics" ) @@ -34,3 +35,17 @@ func getOrRegisterBrokerMeter(name string, broker *Broker, r metrics.Registry) m func getOrRegisterBrokerHistogram(name string, broker *Broker, r metrics.Registry) metrics.Histogram { return getOrRegisterHistogram(getMetricNameForBroker(name, broker), r) } + +func getMetricNameForTopic(name string, topic string) string { + // Convert dot to _ since reporters like Graphite typically use dot to represent hierarchy + // cf. KAFKA-1902 and KAFKA-2337 + return fmt.Sprintf(name+"-for-topic-%s", strings.Replace(topic, ".", "_", -1)) +} + +func getOrRegisterTopicMeter(name string, topic string, r metrics.Registry) metrics.Meter { + return metrics.GetOrRegisterMeter(getMetricNameForTopic(name, topic), r) +} + +func getOrRegisterTopicHistogram(name string, topic string, r metrics.Registry) metrics.Histogram { + return getOrRegisterHistogram(getMetricNameForTopic(name, topic), r) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/mockbroker.go b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/mockbroker.go index e8bd088c..0734d34f 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/mockbroker.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/mockbroker.go @@ -215,7 +215,7 @@ func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup) } Logger.Printf("*** mockbroker/%d/%d: served %v -> %v", b.brokerID, idx, req, res) - encodedRes, err := encode(res) + encodedRes, err := encode(res, nil) if err != nil { b.serverError(err) break diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/mocks/consumer.go b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/mocks/consumer.go index 09657c0e..a524190a 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/mocks/consumer.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/mocks/consumer.go @@ -96,6 +96,22 @@ func (c *Consumer) Partitions(topic string) ([]int32, error) { return c.metadata[topic], nil } +func (c *Consumer) HighWaterMarks() map[string]map[int32]int64 { + c.l.Lock() + defer c.l.Unlock() + + hwms := make(map[string]map[int32]int64, len(c.partitionConsumers)) + for topic, partitionConsumers := range c.partitionConsumers { + hwm := make(map[int32]int64, len(partitionConsumers)) + for partition, pc := range partitionConsumers { + hwm[partition] = pc.HighWaterMarkOffset() + } + hwms[topic] = hwm + } + + return hwms +} + // Close implements the Close method from the sarama.Consumer interface. It will close // all registered PartitionConsumer instances. func (c *Consumer) Close() error { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/offset_request.go b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/offset_request.go index 2f74df3d..a14f7182 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/offset_request.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/offset_request.go @@ -2,30 +2,35 @@ package sarama type offsetRequestBlock struct { time int64 - maxOffsets int32 + maxOffsets int32 // Only used in version 0 } -func (b *offsetRequestBlock) encode(pe packetEncoder) error { +func (b *offsetRequestBlock) encode(pe packetEncoder, version int16) error { pe.putInt64(int64(b.time)) - pe.putInt32(b.maxOffsets) + if version == 0 { + pe.putInt32(b.maxOffsets) + } + return nil } -func (b *offsetRequestBlock) decode(pd packetDecoder) (err error) { +func (b *offsetRequestBlock) decode(pd packetDecoder, version int16) (err error) { if b.time, err = pd.getInt64(); err != nil { return err } - if b.maxOffsets, err = pd.getInt32(); err != nil { - return err + if version == 0 { + if b.maxOffsets, err = pd.getInt32(); err != nil { + return err + } } return nil } type OffsetRequest struct { - replicaID *int32 - blocks map[string]map[int32]*offsetRequestBlock - + Version int16 + replicaID *int32 storeReplicaID int32 + blocks map[string]map[int32]*offsetRequestBlock } func (r *OffsetRequest) encode(pe packetEncoder) error { @@ -51,7 +56,7 @@ func (r *OffsetRequest) encode(pe packetEncoder) error { } for partition, block := range partitions { pe.putInt32(partition) - if err = block.encode(pe); err != nil { + if err = block.encode(pe, r.Version); err != nil { return err } } @@ -60,6 +65,8 @@ func (r *OffsetRequest) encode(pe packetEncoder) error { } func (r *OffsetRequest) decode(pd packetDecoder, version int16) error { + r.Version = version + // Ignore replica ID if _, err := pd.getInt32(); err != nil { return err @@ -88,7 +95,7 @@ func (r *OffsetRequest) decode(pd packetDecoder, version int16) error { return err } block := &offsetRequestBlock{} - if err := block.decode(pd); err != nil { + if err := block.decode(pd, version); err != nil { return err } r.blocks[topic][partition] = block @@ -102,11 +109,16 @@ func (r *OffsetRequest) key() int16 { } func (r *OffsetRequest) version() int16 { - return 0 + return r.Version } func (r *OffsetRequest) requiredVersion() KafkaVersion { - return minVersion + switch r.Version { + case 1: + return V0_10_1_0 + default: + return minVersion + } } func (r *OffsetRequest) SetReplicaID(id int32) { @@ -125,7 +137,9 @@ func (r *OffsetRequest) AddBlock(topic string, partitionID int32, time int64, ma tmp := new(offsetRequestBlock) tmp.time = time - tmp.maxOffsets = maxOffsets + if r.Version == 0 { + tmp.maxOffsets = maxOffsets + } r.blocks[topic][partitionID] = tmp } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/offset_response.go b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/offset_response.go index ad1a6697..9a9cfe96 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/offset_response.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/offset_response.go @@ -1,30 +1,57 @@ package sarama type OffsetResponseBlock struct { - Err KError - Offsets []int64 + Err KError + Offsets []int64 // Version 0 + Offset int64 // Version 1 + Timestamp int64 // Version 1 } -func (b *OffsetResponseBlock) decode(pd packetDecoder) (err error) { +func (b *OffsetResponseBlock) decode(pd packetDecoder, version int16) (err error) { tmp, err := pd.getInt16() if err != nil { return err } b.Err = KError(tmp) - b.Offsets, err = pd.getInt64Array() + if version == 0 { + b.Offsets, err = pd.getInt64Array() - return err + return err + } + + b.Timestamp, err = pd.getInt64() + if err != nil { + return err + } + + b.Offset, err = pd.getInt64() + if err != nil { + return err + } + + // For backwards compatibility put the offset in the offsets array too + b.Offsets = []int64{b.Offset} + + return nil } -func (b *OffsetResponseBlock) encode(pe packetEncoder) (err error) { +func (b *OffsetResponseBlock) encode(pe packetEncoder, version int16) (err error) { pe.putInt16(int16(b.Err)) - return pe.putInt64Array(b.Offsets) + if version == 0 { + return pe.putInt64Array(b.Offsets) + } + + pe.putInt64(b.Timestamp) + pe.putInt64(b.Offset) + + return nil } type OffsetResponse struct { - Blocks map[string]map[int32]*OffsetResponseBlock + Version int16 + Blocks map[string]map[int32]*OffsetResponseBlock } func (r *OffsetResponse) decode(pd packetDecoder, version int16) (err error) { @@ -54,7 +81,7 @@ func (r *OffsetResponse) decode(pd packetDecoder, version int16) (err error) { } block := new(OffsetResponseBlock) - err = block.decode(pd) + err = block.decode(pd, version) if err != nil { return err } @@ -106,7 +133,7 @@ func (r *OffsetResponse) encode(pe packetEncoder) (err error) { } for partition, block := range partitions { pe.putInt32(partition) - if err = block.encode(pe); err != nil { + if err = block.encode(pe, r.version()); err != nil { return err } } @@ -120,11 +147,16 @@ func (r *OffsetResponse) key() int16 { } func (r *OffsetResponse) version() int16 { - return 0 + return r.Version } func (r *OffsetResponse) requiredVersion() KafkaVersion { - return minVersion + switch r.Version { + case 1: + return V0_10_1_0 + default: + return minVersion + } } // testing API @@ -138,5 +170,5 @@ func (r *OffsetResponse) AddTopicPartition(topic string, partition int32, offset byTopic = make(map[int32]*OffsetResponseBlock) r.Blocks[topic] = byTopic } - byTopic[partition] = &OffsetResponseBlock{Offsets: []int64{offset}} + byTopic[partition] = &OffsetResponseBlock{Offsets: []int64{offset}, Offset: offset} } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/packet_encoder.go b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/packet_encoder.go index 0df6e24a..27a10f6d 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/packet_encoder.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/packet_encoder.go @@ -1,5 +1,7 @@ package sarama +import "github.com/rcrowley/go-metrics" + // PacketEncoder is the interface providing helpers for writing with Kafka's encoding rules. // Types implementing Encoder only need to worry about calling methods like PutString, // not about how a string is represented in Kafka. @@ -19,9 +21,15 @@ type packetEncoder interface { putInt32Array(in []int32) error putInt64Array(in []int64) error + // Provide the current offset to record the batch size metric + offset() int + // Stacks, see PushEncoder push(in pushEncoder) pop() error + + // To record metrics when provided + metricRegistry() metrics.Registry } // PushEncoder is the interface for encoding fields like CRCs and lengths where the value diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/prep_encoder.go b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/prep_encoder.go index 8c6ba850..fd5ea0f9 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/prep_encoder.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/prep_encoder.go @@ -3,6 +3,8 @@ package sarama import ( "fmt" "math" + + "github.com/rcrowley/go-metrics" ) type prepEncoder struct { @@ -99,6 +101,10 @@ func (pe *prepEncoder) putInt64Array(in []int64) error { return nil } +func (pe *prepEncoder) offset() int { + return pe.length +} + // stackable func (pe *prepEncoder) push(in pushEncoder) { @@ -108,3 +114,8 @@ func (pe *prepEncoder) push(in pushEncoder) { func (pe *prepEncoder) pop() error { return nil } + +// we do not record metrics during the prep encoder pass +func (pe *prepEncoder) metricRegistry() metrics.Registry { + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/produce_request.go b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/produce_request.go index f8a25094..40dc8015 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/produce_request.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/produce_request.go @@ -1,5 +1,7 @@ package sarama +import "github.com/rcrowley/go-metrics" + // RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements // it must see before responding. Any of the constants defined here are valid. On broker versions // prior to 0.8.2.0 any other positive int16 is also valid (the broker will wait for that many @@ -12,7 +14,9 @@ const ( NoResponse RequiredAcks = 0 // WaitForLocal waits for only the local commit to succeed before responding. WaitForLocal RequiredAcks = 1 - // WaitForAll waits for all replicas to commit before responding. + // WaitForAll waits for all in-sync replicas to commit before responding. + // The minimum number of in-sync replicas is configured on the broker via + // the `min.insync.replicas` configuration key. WaitForAll RequiredAcks = -1 ) @@ -30,6 +34,15 @@ func (r *ProduceRequest) encode(pe packetEncoder) error { if err != nil { return err } + metricRegistry := pe.metricRegistry() + var batchSizeMetric metrics.Histogram + var compressionRatioMetric metrics.Histogram + if metricRegistry != nil { + batchSizeMetric = getOrRegisterHistogram("batch-size", metricRegistry) + compressionRatioMetric = getOrRegisterHistogram("compression-ratio", metricRegistry) + } + + totalRecordCount := int64(0) for topic, partitions := range r.msgSets { err = pe.putString(topic) if err != nil { @@ -39,7 +52,13 @@ func (r *ProduceRequest) encode(pe packetEncoder) error { if err != nil { return err } + topicRecordCount := int64(0) + var topicCompressionRatioMetric metrics.Histogram + if metricRegistry != nil { + topicCompressionRatioMetric = getOrRegisterTopicHistogram("compression-ratio", topic, metricRegistry) + } for id, msgSet := range partitions { + startOffset := pe.offset() pe.putInt32(id) pe.push(&lengthField{}) err = msgSet.encode(pe) @@ -50,8 +69,41 @@ func (r *ProduceRequest) encode(pe packetEncoder) error { if err != nil { return err } + if metricRegistry != nil { + for _, messageBlock := range msgSet.Messages { + // Is this a fake "message" wrapping real messages? + if messageBlock.Msg.Set != nil { + topicRecordCount += int64(len(messageBlock.Msg.Set.Messages)) + } else { + // A single uncompressed message + topicRecordCount++ + } + // Better be safe than sorry when computing the compression ratio + if messageBlock.Msg.compressedSize != 0 { + compressionRatio := float64(len(messageBlock.Msg.Value)) / + float64(messageBlock.Msg.compressedSize) + // Histogram do not support decimal values, let's multiple it by 100 for better precision + intCompressionRatio := int64(100 * compressionRatio) + compressionRatioMetric.Update(intCompressionRatio) + topicCompressionRatioMetric.Update(intCompressionRatio) + } + } + batchSize := int64(pe.offset() - startOffset) + batchSizeMetric.Update(batchSize) + getOrRegisterTopicHistogram("batch-size", topic, metricRegistry).Update(batchSize) + } + } + if topicRecordCount > 0 { + getOrRegisterTopicMeter("record-send-rate", topic, metricRegistry).Mark(topicRecordCount) + getOrRegisterTopicHistogram("records-per-request", topic, metricRegistry).Update(topicRecordCount) + totalRecordCount += topicRecordCount } } + if totalRecordCount > 0 { + metrics.GetOrRegisterMeter("record-send-rate", metricRegistry).Mark(totalRecordCount) + getOrRegisterHistogram("records-per-request", metricRegistry).Update(totalRecordCount) + } + return nil } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/produce_set.go b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/produce_set.go index 74025b0e..158d9c47 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/produce_set.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/produce_set.go @@ -89,7 +89,7 @@ func (ps *produceSet) buildRequest() *ProduceRequest { // and sent as the payload of a single fake "message" with the appropriate codec // set and no key. When the server sees a message with a compression codec, it // decompresses the payload and treats the result as its message set. - payload, err := encode(set.setToSend) + payload, err := encode(set.setToSend, ps.parent.conf.MetricRegistry) if err != nil { Logger.Println(err) // if this happens, it's basically our fault. panic(err) @@ -98,6 +98,7 @@ func (ps *produceSet) buildRequest() *ProduceRequest { Codec: ps.parent.conf.Producer.Compression, Key: nil, Value: payload, + Set: set.setToSend, // Provide the underlying message set for accurate metrics } if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { compMsg.Version = 1 diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/real_encoder.go b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/real_encoder.go index 076fdd0c..ced4267c 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/real_encoder.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/real_encoder.go @@ -1,11 +1,16 @@ package sarama -import "encoding/binary" +import ( + "encoding/binary" + + "github.com/rcrowley/go-metrics" +) type realEncoder struct { - raw []byte - off int - stack []pushEncoder + raw []byte + off int + stack []pushEncoder + registry metrics.Registry } // primitives @@ -98,6 +103,10 @@ func (re *realEncoder) putInt64Array(in []int64) error { return nil } +func (re *realEncoder) offset() int { + return re.off +} + // stacks func (re *realEncoder) push(in pushEncoder) { @@ -113,3 +122,8 @@ func (re *realEncoder) pop() error { return in.run(re.off, re.raw) } + +// we do record metrics during the real encoder pass +func (re *realEncoder) metricRegistry() metrics.Registry { + return re.registry +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/request.go b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/request.go index 3cca8bd2..73310ca8 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/request.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/request.go @@ -89,7 +89,7 @@ func allocateBody(key, version int16) protocolBody { case 1: return &FetchRequest{} case 2: - return &OffsetRequest{} + return &OffsetRequest{Version: version} case 3: return &MetadataRequest{} case 8: diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/sarama.go b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/sarama.go index a33b1bdb..7d5dc60d 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/sarama.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/sarama.go @@ -25,25 +25,42 @@ Metrics are exposed through https://github.com/rcrowley/go-metrics library in a Broker related metrics: - +------------------------------------------------+------------+---------------------------------------------------------------+ - | Name | Type | Description | - +------------------------------------------------+------------+---------------------------------------------------------------+ - | incoming-byte-rate | meter | Bytes/second read off all brokers | - | incoming-byte-rate-for-broker- | meter | Bytes/second read off a given broker | - | outgoing-byte-rate | meter | Bytes/second written off all brokers | - | outgoing-byte-rate-for-broker- | meter | Bytes/second written off a given broker | - | request-rate | meter | Requests/second sent to all brokers | - | request-rate-for-broker- | meter | Requests/second sent to a given broker | - | histogram request-size | histogram | Distribution of the request size in bytes for all brokers | - | histogram request-size-for-broker- | histogram | Distribution of the request size in bytes for a given broker | - | response-rate | meter | Responses/second received from all brokers | - | response-rate-for-broker- | meter | Responses/second received from a given broker | - | histogram response-size | histogram | Distribution of the response size in bytes for all brokers | - | histogram response-size-for-broker- | histogram | Distribution of the response size in bytes for a given broker | - +------------------------------------------------+------------+---------------------------------------------------------------+ + +----------------------------------------------+------------+---------------------------------------------------------------+ + | Name | Type | Description | + +----------------------------------------------+------------+---------------------------------------------------------------+ + | incoming-byte-rate | meter | Bytes/second read off all brokers | + | incoming-byte-rate-for-broker- | meter | Bytes/second read off a given broker | + | outgoing-byte-rate | meter | Bytes/second written off all brokers | + | outgoing-byte-rate-for-broker- | meter | Bytes/second written off a given broker | + | request-rate | meter | Requests/second sent to all brokers | + | request-rate-for-broker- | meter | Requests/second sent to a given broker | + | request-size | histogram | Distribution of the request size in bytes for all brokers | + | request-size-for-broker- | histogram | Distribution of the request size in bytes for a given broker | + | request-latency-in-ms | histogram | Distribution of the request latency in ms for all brokers | + | request-latency-in-ms-for-broker- | histogram | Distribution of the request latency in ms for a given broker | + | response-rate | meter | Responses/second received from all brokers | + | response-rate-for-broker- | meter | Responses/second received from a given broker | + | response-size | histogram | Distribution of the response size in bytes for all brokers | + | response-size-for-broker- | histogram | Distribution of the response size in bytes for a given broker | + +----------------------------------------------+------------+---------------------------------------------------------------+ Note that we do not gather specific metrics for seed brokers but they are part of the "all brokers" metrics. +Producer related metrics: + + +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ + | Name | Type | Description | + +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ + | batch-size | histogram | Distribution of the number of bytes sent per partition per request for all topics | + | batch-size-for-topic- | histogram | Distribution of the number of bytes sent per partition per request for a given topic | + | record-send-rate | meter | Records/second sent to all topics | + | record-send-rate-for-topic- | meter | Records/second sent to a given topic | + | records-per-request | histogram | Distribution of the number of records sent per request for all topics | + | records-per-request-for-topic- | histogram | Distribution of the number of records sent per request for a given topic | + | compression-ratio | histogram | Distribution of the compression ratio times 100 of record batches for all topics | + | compression-ratio-for-topic- | histogram | Distribution of the compression ratio times 100 of record batches for a given topic | + +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ + */ package sarama diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/sync_group_request.go b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/sync_group_request.go index 7fbe47b2..fe207080 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/sync_group_request.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/sync_group_request.go @@ -90,7 +90,7 @@ func (r *SyncGroupRequest) AddGroupAssignment(memberId string, memberAssignment } func (r *SyncGroupRequest) AddGroupAssignmentMember(memberId string, memberAssignment *ConsumerGroupMemberAssignment) error { - bin, err := encode(memberAssignment) + bin, err := encode(memberAssignment, nil) if err != nil { return err } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/sync_producer.go b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/sync_producer.go index b181527f..c77ae314 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/sync_producer.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/sync_producer.go @@ -9,6 +9,9 @@ import "sync" // The SyncProducer comes with two caveats: it will generally be less efficient than the AsyncProducer, and the actual // durability guarantee provided when a message is acknowledged depend on the configured value of `Producer.RequiredAcks`. // There are configurations where a message acknowledged by the SyncProducer can still sometimes be lost. +// +// For implementation reasons, the SyncProducer requires `Producer.Return.Errors` and `Producer.Return.Successes` to +// be set to true in its configuration. type SyncProducer interface { // SendMessage produces a given message, and returns only when it either has @@ -36,6 +39,15 @@ type syncProducer struct { // NewSyncProducer creates a new SyncProducer using the given broker addresses and configuration. func NewSyncProducer(addrs []string, config *Config) (SyncProducer, error) { + if config == nil { + config = NewConfig() + config.Producer.Return.Successes = true + } + + if err := verifyProducerConfig(config); err != nil { + return nil, err + } + p, err := NewAsyncProducer(addrs, config) if err != nil { return nil, err @@ -46,6 +58,10 @@ func NewSyncProducer(addrs []string, config *Config) (SyncProducer, error) { // NewSyncProducerFromClient creates a new SyncProducer using the given client. It is still // necessary to call Close() on the underlying client when shutting down this producer. func NewSyncProducerFromClient(client Client) (SyncProducer, error) { + if err := verifyProducerConfig(client.Config()); err != nil { + return nil, err + } + p, err := NewAsyncProducerFromClient(client) if err != nil { return nil, err @@ -54,8 +70,6 @@ func NewSyncProducerFromClient(client Client) (SyncProducer, error) { } func newSyncProducerFromAsyncProducer(p *asyncProducer) *syncProducer { - p.conf.Producer.Return.Successes = true - p.conf.Producer.Return.Errors = true sp := &syncProducer{producer: p} sp.wg.Add(2) @@ -65,6 +79,16 @@ func newSyncProducerFromAsyncProducer(p *asyncProducer) *syncProducer { return sp } +func verifyProducerConfig(config *Config) error { + if !config.Producer.Return.Errors { + return ConfigurationError("Producer.Return.Errors must be true to be used in a SyncProducer") + } + if !config.Producer.Return.Successes { + return ConfigurationError("Producer.Return.Successes must be true to be used in a SyncProducer") + } + return nil +} + func (sp *syncProducer) SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) { oldMetadata := msg.Metadata defer func() { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/kafka-console-producer.go b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/kafka-console-producer.go index e0ef7daa..83054ed7 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/kafka-console-producer.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/kafka-console-producer.go @@ -43,6 +43,7 @@ func main() { config := sarama.NewConfig() config.Producer.RequiredAcks = sarama.WaitForAll + config.Producer.Return.Successes = true switch *partitioner { case "": diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/utils.go b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/utils.go index c2da3866..3cbab2d9 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/utils.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/utils.go @@ -147,5 +147,6 @@ var ( V0_9_0_1 = newKafkaVersion(0, 9, 0, 1) V0_10_0_0 = newKafkaVersion(0, 10, 0, 0) V0_10_0_1 = newKafkaVersion(0, 10, 0, 1) + V0_10_1_0 = newKafkaVersion(0, 10, 1, 0) minVersion = V0_8_2_0 ) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/vagrant/kafka.conf b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/vagrant/kafka.conf index d975de43..25101df5 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/vagrant/kafka.conf +++ b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/vagrant/kafka.conf @@ -1,5 +1,9 @@ start on started zookeeper-ZK_PORT stop on stopping zookeeper-ZK_PORT -pre-start exec sleep 2 -exec /opt/kafka-KAFKAID/bin/kafka-server-start.sh /opt/kafka-KAFKAID/config/server.properties +# Use a script instead of exec (using env stanza leaks KAFKA_HEAP_OPTS from zookeeper) +script + sleep 2 + export KAFKA_HEAP_OPTS="-Xmx320m" + exec /opt/kafka-KAFKAID/bin/kafka-server-start.sh /opt/kafka-KAFKAID/config/server.properties +end script diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/vagrant/zookeeper.conf b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/vagrant/zookeeper.conf index 421ae053..5a63e281 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/vagrant/zookeeper.conf +++ b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/vagrant/zookeeper.conf @@ -1,4 +1,7 @@ start on started toxiproxy stop on stopping toxiproxy -exec /opt/kafka-KAFKAID/bin/zookeeper-server-start.sh /opt/kafka-KAFKAID/config/zookeeper.properties +script + export KAFKA_HEAP_OPTS="-Xmx192m" + exec /opt/kafka-KAFKAID/bin/zookeeper-server-start.sh /opt/kafka-KAFKAID/config/zookeeper.properties +end script diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/CHANGELOG.md b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/CHANGELOG.md index 1d5c8942..de0633e2 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/CHANGELOG.md +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/CHANGELOG.md @@ -14,6 +14,56 @@ This project adheres to [Semantic Versioning](http://semver.org/). ### Fixed +## [0.4.5] + +### Changed +- merging sub-configs enforces strict variable expansion #85 + +### Fixed +- fix merging nil sub-configs #85 + +## [0.4.4] + +### Added +- Add support for pure array config files #82 + +### Changed +- Invalid top-level types return non-critical error (no stack-trace) on merge #82 + +### Fixed +- Fix panic when merging or creating a config from nil interface value #82 + +## [0.4.3] + +### Changed +- Add per element type stop set for handling unquoted strings (reduces need for quoting strings in environment variables) #80 + +### Fixed +- fix issue unpacking array from environment variable into struct array fields #80 +- fix unparsed values being used for unpacking #80 + +## [0.4.2] + +### Fixed +- Treat `,` character as only special character in non quoted top-level strings. #78 + +## [0.4.1] + +### Fixed +- Fix parsing empty string or nil objects from environment variables. #76 + +## [0.4.0] + +### Added +- Syntax for passing lists and dictionaries to flags. #72 +- Add Unpacker interface specializations for primitive types. #73 +- Variable expansion parsing lists and dictionaries with parser introduced in + #72. #74 + +### Fixed +- Fix Unpacker interface not applied if some 'old' value is already present on + target and is struct implementing Unpack. #73 + ## [0.3.7] ### Fixed @@ -118,7 +168,13 @@ This project adheres to [Semantic Versioning](http://semver.org/). - Introduced CHANGELOG.md for documenting changes to ucfg. -[Unreleased]: https://github.com/elastic/go-ucfg/compare/v0.3.7...HEAD +[Unreleased]: https://github.com/elastic/go-ucfg/compare/v0.4.5...HEAD +[0.4.5]: https://github.com/elastic/go-ucfg/compare/v0.4.4...v0.4.5 +[0.4.4]: https://github.com/elastic/go-ucfg/compare/v0.4.3...v0.4.4 +[0.4.3]: https://github.com/elastic/go-ucfg/compare/v0.4.2...v0.4.3 +[0.4.2]: https://github.com/elastic/go-ucfg/compare/v0.4.1...v0.4.2 +[0.4.1]: https://github.com/elastic/go-ucfg/compare/v0.4.0...v0.4.1 +[0.4.0]: https://github.com/elastic/go-ucfg/compare/v0.3.7...v0.4.0 [0.3.7]: https://github.com/elastic/go-ucfg/compare/v0.3.6...v0.3.7 [0.3.6]: https://github.com/elastic/go-ucfg/compare/v0.3.5...v0.3.6 [0.3.5]: https://github.com/elastic/go-ucfg/compare/v0.3.4...v0.3.5 diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/doc.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/doc.go new file mode 100644 index 00000000..73c6d1ee --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/doc.go @@ -0,0 +1,10 @@ +// Package ucfg provides a common representation for hierarchical configurations. +// +// The common representation provided by the Config type can be used with different +// configuration file formats like XML, JSON, HSJSON, YAML, or TOML. +// +// Config provides a low level and a high level interface for reading settings +// with additional features like custom unpackers, validation and capturing +// sub-configurations for deferred interpretation, lazy intra-configuration +// variable expansion, and OS environment variable expansion. +package ucfg diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/error.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/error.go index de8ef410..5c8e8114 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/error.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/error.go @@ -7,13 +7,18 @@ import ( "runtime/debug" ) +// Error type returned by all public functions in go-ucfg. type Error interface { error - Reason() error // error class, one of ErrConfig, ErrImplementation, ErrUnknown Class() error + // The internal reason error code like ErrMissing, ErrRequired, + // ErrTypeMismatch and others. + Reason() error + + // The error message. Message() string // [optional] path of config element error occurred for @@ -27,6 +32,7 @@ type baseError struct { reason error class error message string + path string } type criticalError struct { @@ -34,13 +40,7 @@ type criticalError struct { trace string } -type pathError struct { - baseError - meta *Meta - path string -} - -// error Reasons +// Error Reasons var ( ErrMissing = errors.New("missing field") @@ -79,28 +79,30 @@ var ( ErrEmpty = errors.New("empty field") ) -// error classes +// Error Classes var ( ErrConfig = errors.New("Configuration error") ErrImplementation = errors.New("Implementation error") ErrUnknown = errors.New("Unspecified") ) -func (e baseError) Message() string { return e.Error() } -func (e baseError) Reason() error { return e.reason } -func (e baseError) Class() error { return e.class } -func (e baseError) Trace() string { return "" } -func (e baseError) Path() string { return "" } +func (e baseError) Error() string { return e.Message() } +func (e baseError) Reason() error { return e.reason } +func (e baseError) Class() error { return e.class } +func (e baseError) Trace() string { return "" } +func (e baseError) Path() string { return e.path } -func (e baseError) Error() string { +func (e baseError) Message() string { if e.message == "" { return e.reason.Error() } return e.message } +func (e criticalError) Trace() string { return e.trace } + func (e criticalError) Error() string { - return fmt.Sprintf("%s\nTrace:%v\n", e.baseError, e.trace) + return fmt.Sprintf("%s\nTrace:%v\n", e.baseError.Message(), e.trace) } func raiseErr(reason error, message string) Error { @@ -126,21 +128,14 @@ func raiseCritical(reason error, message string) Error { message = fmt.Sprintf("(assert) %v", message) } return criticalError{ - baseError{reason, ErrImplementation, message}, + baseError{reason, ErrImplementation, message, ""}, string(debug.Stack()), } } func raisePathErr(reason error, meta *Meta, message, path string) Error { - // fmt.Printf("path err, reason='%v', meta=%v, message='%v', path='%v'\n", reason, meta, message, path) message = messagePath(reason, meta, message, path) - // fmt.Printf(" -> report message: %v\n", message) - - return pathError{ - baseError{reason, ErrConfig, message}, - meta, - path, - } + return baseError{reason, ErrConfig, message, path} } func messageMeta(message string, meta *Meta) string { @@ -171,7 +166,11 @@ func raiseDuplicateKey(cfg *Config, name string) Error { func raiseMissing(c *Config, field string) Error { // error reading field from config, as missing in c - return raisePathErr(ErrMissing, c.metadata, "", c.PathOf(field, ".")) + return raiseMissingMsg(c, field, "") +} + +func raiseMissingMsg(c *Config, field string, message string) Error { + return raisePathErr(ErrMissing, c.metadata, message, c.PathOf(field, ".")) } func raiseMissingArr(ctx context, meta *Meta, idx int) Error { @@ -187,11 +186,11 @@ func raiseIndexOutOfBounds(opts *options, value value, idx int) Error { return raisePathErr(reason, value.meta(), message, ctx.path(".")) } -func raiseInvalidTopLevelType(v interface{}) Error { - // most likely developers fault +func raiseInvalidTopLevelType(v interface{}, meta *Meta) Error { + // could be developers or user fault t := chaseTypePointers(chaseValue(reflect.ValueOf(v)).Type()) - message := fmt.Sprintf("can not use go type '%v' for merging/unpacking configurations", t) - return raiseCritical(ErrTypeMismatch, message) + message := fmt.Sprintf("type '%v' is not supported on top level of config, only dictionary or list", t) + return raiseErr(ErrTypeMismatch, messageMeta(message, meta)) } func raiseKeyInvalidTypeUnpack(t reflect.Type, from *Config) Error { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/flag/util.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/flag/util.go index ae1efa8f..07cc5eab 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/flag/util.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/flag/util.go @@ -35,7 +35,7 @@ func (v *FlagValue) String() string { if v.collector == nil { return "" } - + return toString(v.Config(), v.collector.GetOptions(), v.onError) } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/flag/value.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/flag/value.go index ddb4bc35..2eeaf35c 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/flag/value.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/flag/value.go @@ -2,10 +2,10 @@ package flag import ( "fmt" - "strconv" "strings" "github.com/elastic/go-ucfg" + "github.com/elastic/go-ucfg/internal/parse" ) // NewFlagKeyValue implements the flag.Value interface for @@ -32,6 +32,7 @@ func NewFlagKeyValue(cfg *ucfg.Config, autoBool bool, opts ...ucfg.Option) *Flag return newFlagValue(cfg, opts, func(arg string) (*ucfg.Config, error, error) { var key string var val interface{} + var err error args := strings.SplitN(arg, "=", 2) if len(args) < 2 { @@ -44,7 +45,10 @@ func NewFlagKeyValue(cfg *ucfg.Config, autoBool bool, opts ...ucfg.Option) *Flag val = true } else { key = args[0] - val = parseCLIValue(args[1]) + val, err = parse.Value(args[1]) + if err != nil { + return nil, err, err + } } tmp := map[string]interface{}{key: val} @@ -52,31 +56,3 @@ func NewFlagKeyValue(cfg *ucfg.Config, autoBool bool, opts ...ucfg.Option) *Flag return cfg, err, err }) } - -func parseCLIValue(value string) interface{} { - if b, ok := parseBoolValue(value); ok { - return b - } - - if n, err := strconv.ParseUint(value, 0, 64); err == nil { - return n - } - if n, err := strconv.ParseInt(value, 0, 64); err == nil { - return n - } - if n, err := strconv.ParseFloat(value, 64); err == nil { - return n - } - - return value -} - -func parseBoolValue(str string) (value bool, ok bool) { - switch str { - case "1", "t", "T", "true", "TRUE", "True", "on", "ON": - return true, true - case "0", "f", "F", "false", "FALSE", "False", "off", "OFF": - return false, true - } - return false, false -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/getset.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/getset.go index e41db69a..518fa2c8 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/getset.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/getset.go @@ -1,7 +1,7 @@ package ucfg // ****************************************************************************** -// Low level getters and setters (do we actually need this?) +// Low level getters and setters // ****************************************************************************** func convertErr(opts *options, v value, err error, to string) Error { @@ -11,8 +11,12 @@ func convertErr(opts *options, v value, err error, to string) Error { return raiseConversion(opts, v, err, to) } -// number of elements for this field. If config value is a list, returns number -// of elements in list +// CountField returns number of entries in a table or 1 if entry is a primitive value. +// Primitives settings can be handled like a list with 1 entry. +// +// If name is empty, the total number of top-level settings is returned. +// +// CountField supports the options: PathSep, Env, Resolve, ResolveEnv func (c *Config) CountField(name string, opts ...Option) (int, error) { if name == "" { return len(c.fields.array()) + len(c.fields.dict()), nil @@ -24,6 +28,16 @@ func (c *Config) CountField(name string, opts ...Option) (int, error) { return -1, raiseMissing(c, name) } +// Bool reads a boolean setting returning an error if the setting has no +// boolean value. +// +// The setting path is constructed from name and idx. If name is set and idx is -1, +// only the name is used to access the setting by name. If name is empty, idx +// must be >= 0, assuming the Config is a list. If both name and idx are set, +// the name must point to a list. The number of entries in a named list can be read +// using CountField. +// +// Bool supports the options: PathSep, Env, Resolve, ResolveEnv func (c *Config) Bool(name string, idx int, opts ...Option) (bool, error) { O := makeOptions(opts) v, err := c.getField(name, idx, O) @@ -34,6 +48,16 @@ func (c *Config) Bool(name string, idx int, opts ...Option) (bool, error) { return b, convertErr(O, v, fail, "bool") } +// Strings reads a string setting returning an error if the setting has +// no string or primitive value convertible to string. +// +// The setting path is constructed from name and idx. If name is set and idx is -1, +// only the name is used to access the setting by name. If name is empty, idx +// must be >= 0, assuming the Config is a list. If both name and idx are set, +// the name must point to a list. The number of entries in a named list can be read +// using CountField. +// +// String supports the options: PathSep, Env, Resolve, ResolveEnv func (c *Config) String(name string, idx int, opts ...Option) (string, error) { O := makeOptions(opts) v, err := c.getField(name, idx, O) @@ -44,16 +68,39 @@ func (c *Config) String(name string, idx int, opts ...Option) (string, error) { return s, convertErr(O, v, fail, "string") } +// Int reads an int64 value returning an error if the setting is +// not integer value, the primitive value is not convertible to int or a conversion +// would create an integer overflow. +// +// The setting path is constructed from name and idx. If name is set and idx is -1, +// only the name is used to access the setting by name. If name is empty, idx +// must be >= 0, assuming the Config is a list. If both name and idx are set, +// the name must point to a list. The number of entries in a named list can be read +// using CountField. +// +// Int supports the options: PathSep, Env, Resolve, ResolveEnv func (c *Config) Int(name string, idx int, opts ...Option) (int64, error) { O := makeOptions(opts) v, err := c.getField(name, idx, O) if err != nil { return 0, err } + i, fail := v.toInt(O) return i, convertErr(O, v, fail, "int") } +// Uint reads an uint64 value returning an error if the setting is +// not unsigned value, the primitive value is not convertible to uint64 or a conversion +// would create an integer overflow. +// +// The setting path is constructed from name and idx. If name is set and idx is -1, +// only the name is used to access the setting by name. If name is empty, idx +// must be >= 0, assuming the Config is a list. If both name and idx are set, +// the name must point to a list. The number of entries in a named list can be read +// using CountField. +// +// Uint supports the options: PathSep, Env, Resolve, ResolveEnv func (c *Config) Uint(name string, idx int, opts ...Option) (uint64, error) { O := makeOptions(opts) v, err := c.getField(name, idx, O) @@ -64,6 +111,16 @@ func (c *Config) Uint(name string, idx int, opts ...Option) (uint64, error) { return u, convertErr(O, v, fail, "uint") } +// Float reads a float64 value returning an error if the setting is +// not a float value or the primitive value is not convertible to float. +// +// The setting path is constructed from name and idx. If name is set and idx is -1, +// only the name is used to access the setting by name. If name is empty, idx +// must be >= 0, assuming the Config is a list. If both name and idx are set, +// the name must point to a list. The number of entries in a named list can be read +// using CountField. +// +// Float supports the options: PathSep, Env, Resolve, ResolveEnv func (c *Config) Float(name string, idx int, opts ...Option) (float64, error) { O := makeOptions(opts) v, err := c.getField(name, idx, O) @@ -74,6 +131,16 @@ func (c *Config) Float(name string, idx int, opts ...Option) (float64, error) { return f, convertErr(O, v, fail, "float") } +// Child returns a child configuration or an error if the setting requested is a +// primitive value only. +// +// The setting path is constructed from name and idx. If name is set and idx is -1, +// only the name is used to access the setting by name. If name is empty, idx +// must be >= 0, assuming the Config is a list. If both name and idx are set, +// the name must point to a list. The number of entries in a named list can be read +// using CountField. +// +// Child supports the options: PathSep, Env, Resolve, ResolveEnv func (c *Config) Child(name string, idx int, opts ...Option) (*Config, error) { O := makeOptions(opts) v, err := c.getField(name, idx, O) @@ -84,30 +151,89 @@ func (c *Config) Child(name string, idx int, opts ...Option) (*Config, error) { return c, convertErr(O, v, fail, "object") } +// SetBool sets a boolean primitive value. An error is returned if the new name +// is invalid. +// +// The setting path is constructed from name and idx. If name is set and idx is -1, +// only the name is used to access the setting by name. If name is empty, idx +// must be >= 0, assuming the Config is a list. If both name and idx are set, +// the name must point to a list. The number of entries in a named list can be read +// using CountField. +// +// SetBool supports the options: PathSep, MetaData func (c *Config) SetBool(name string, idx int, value bool, opts ...Option) error { return c.setField(name, idx, &cfgBool{b: value}, opts) } +// SetInt sets an integer primitive value. An error is returned if the new +// name is invalid. +// +// The setting path is constructed from name and idx. If name is set and idx is -1, +// only the name is used to access the setting by name. If name is empty, idx +// must be >= 0, assuming the Config is a list. If both name and idx are set, +// the name must point to a list. The number of entries in a named list can be read +// using CountField. +// +// SetInt supports the options: PathSep, MetaData func (c *Config) SetInt(name string, idx int, value int64, opts ...Option) error { return c.setField(name, idx, &cfgInt{i: value}, opts) } +// SetUint sets an unsigned integer primitive value. An error is returned if +// the name is invalid. +// +// The setting path is constructed from name and idx. If name is set and idx is -1, +// only the name is used to access the setting by name. If name is empty, idx +// must be >= 0, assuming the Config is a list. If both name and idx are set, +// the name must point to a list. The number of entries in a named list can be read +// using CountField. +// +// SetUint supports the options: PathSep, MetaData func (c *Config) SetUint(name string, idx int, value uint64, opts ...Option) error { return c.setField(name, idx, &cfgUint{u: value}, opts) } +// SetFloat sets an floating point primitive value. An error is returned if +// the name is invalid. +// +// The setting path is constructed from name and idx. If name is set and idx is -1, +// only the name is used to access the setting by name. If name is empty, idx +// must be >= 0, assuming the Config is a list. If both name and idx are set, +// the name must point to a list. The number of entries in a named list can be read +// using CountField. +// +// SetFloat supports the options: PathSep, MetaData func (c *Config) SetFloat(name string, idx int, value float64, opts ...Option) error { return c.setField(name, idx, &cfgFloat{f: value}, opts) } +// SetString sets string value. An error is returned if the name is invalid. +// +// The setting path is constructed from name and idx. If name is set and idx is -1, +// only the name is used to access the setting by name. If name is empty, idx +// must be >= 0, assuming the Config is a list. If both name and idx are set, +// the name must point to a list. The number of entries in a named list can be read +// using CountField. +// +// SetString supports the options: PathSep, MetaData func (c *Config) SetString(name string, idx int, value string, opts ...Option) error { return c.setField(name, idx, &cfgString{s: value}, opts) } +// SetChild adds a sub-configuration. An error is returned if the name is invalid. +// +// The setting path is constructed from name and idx. If name is set and idx is -1, +// only the name is used to access the setting by name. If name is empty, idx +// must be >= 0, assuming the Config is a list. If both name and idx are set, +// the name must point to a list. The number of entries in a named list can be read +// using CountField. +// +// SetChild supports the options: PathSep, MetaData func (c *Config) SetChild(name string, idx int, value *Config, opts ...Option) error { return c.setField(name, idx, cfgSub{c: value}, opts) } +// getField supports the options: PathSep, Env, Resolve, ResolveEnv func (c *Config) getField(name string, idx int, opts *options) (value, Error) { p := parsePathIdx(name, opts.pathSep, idx) v, err := p.GetValue(c, opts) @@ -121,6 +247,7 @@ func (c *Config) getField(name string, idx int, opts *options) (value, Error) { return v, nil } +// setField supports the options: PathSep, MetaData func (c *Config) setField(name string, idx int, v value, options []Option) Error { opts := makeOptions(options) p := parsePathIdx(name, opts.pathSep, idx) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/internal/parse/parse.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/internal/parse/parse.go new file mode 100644 index 00000000..53909828 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/internal/parse/parse.go @@ -0,0 +1,305 @@ +package parse + +import ( + "errors" + "fmt" + "strconv" + "strings" + "unicode" +) + +type flagParser struct { + input string +} + +// stopSet definitions for handling unquoted strings +const ( + toplevelStopSet = "," + arrayElemStopSet = ",]" + objKeyStopSet = ":" + objValueStopSet = ",}" +) + +// Value parses command line arguments, supporting +// boolean, numbers, strings, arrays, objects. +// +// The parser implements a superset of JSON, but only a subset of YAML by +// allowing for arrays and objects having a trailing comma. In addition 3 +// strings types are supported: +// +// 1. single quoted string (no unescaping of any characters) +// 2. double quoted strings (characters are escaped) +// 3. strings without quotes. String parsing stops in +// special characters like '[]{},:' +// +// In addition, top-level values can be separated by ',' to build arrays +// without having to use []. +func Value(content string) (interface{}, error) { + p := &flagParser{strings.TrimSpace(content)} + v, err := p.parse() + if err != nil { + return nil, fmt.Errorf("%v when parsing '%v'", err.Error(), content) + } + return v, nil +} + +func (p *flagParser) parse() (interface{}, error) { + var values []interface{} + + for { + v, err := p.parseValue(toplevelStopSet) + if err != nil { + return nil, err + } + values = append(values, v) + + p.ignoreWhitespace() + if p.input == "" { + break + } + + if err := p.expectChar(','); err != nil { + return nil, err + } + } + + switch len(values) { + case 0: + return nil, nil + case 1: + return values[0], nil + } + return values, nil +} + +func (p *flagParser) parseValue(stopSet string) (interface{}, error) { + p.ignoreWhitespace() + in := p.input + + if in == "" { + return nil, nil + } + + switch in[0] { + case '[': + return p.parseArray() + case '{': + return p.parseObj() + case '"': + return p.parseStringDQuote() + case '\'': + return p.parseStringSQuote() + default: + return p.parsePrimitive(stopSet) + } +} + +func (p *flagParser) ignoreWhitespace() { + p.input = strings.TrimLeftFunc(p.input, unicode.IsSpace) +} + +func (p *flagParser) parseArray() (interface{}, error) { + p.input = p.input[1:] + + var values []interface{} +loop: + for { + p.ignoreWhitespace() + if p.input[0] == ']' { + p.input = p.input[1:] + break + } + + v, err := p.parseValue(arrayElemStopSet) + if err != nil { + return nil, err + } + values = append(values, v) + + p.ignoreWhitespace() + if p.input == "" { + return nil, errors.New("array closing ']' missing") + } + + next := p.input[0] + p.input = p.input[1:] + + switch next { + case ']': + break loop + case ',': + continue + default: + return nil, errors.New("array expected ',' or ']'") + } + + } + + if len(values) == 0 { + return nil, nil + } + + return values, nil +} + +func (p *flagParser) parseObj() (interface{}, error) { + p.input = p.input[1:] + + O := map[string]interface{}{} + +loop: + for { + p.ignoreWhitespace() + if p.input[0] == '}' { + p.input = p.input[1:] + break + } + + k, err := p.parseKey() + if err != nil { + return nil, err + } + + p.ignoreWhitespace() + if err := p.expectChar(':'); err != nil { + return nil, err + } + + v, err := p.parseValue(objValueStopSet) + if err != nil { + return nil, err + } + + if p.input == "" { + return nil, errors.New("dictionary expected ',' or '}'") + } + + O[k] = v + next := p.input[0] + p.input = p.input[1:] + + switch next { + case '}': + break loop + case ',': + continue + default: + return nil, errors.New("dictionary expected ',' or '}'") + } + } + + // empty object + if len(O) == 0 { + return nil, nil + } + + return O, nil +} + +func (p *flagParser) parseKey() (string, error) { + in := p.input + if in == "" { + return "", errors.New("expected key") + } + + switch in[0] { + case '"': + return p.parseStringDQuote() + case '\'': + return p.parseStringSQuote() + default: + return p.parseNonQuotedString(objKeyStopSet) + } +} + +func (p *flagParser) parseStringDQuote() (string, error) { + in := p.input + off := 1 + var i int + for { + i = strings.IndexByte(in[off:], '"') + if i < 0 { + return "", errors.New("Missing \" to close string ") + } + + i += off + if in[i-1] != '\\' { + break + } + off = i + 1 + } + + p.input = in[i+1:] + return strconv.Unquote(in[:i+1]) +} + +func (p *flagParser) parseStringSQuote() (string, error) { + in := p.input + i := strings.IndexByte(in[1:], '\'') + if i < 0 { + return "", errors.New("missing ' to close string") + } + + p.input = in[i+2:] + return in[1 : 1+i], nil +} + +func (p *flagParser) parseNonQuotedString(stopSet string) (string, error) { + in := p.input + idx := strings.IndexAny(in, stopSet) + if idx == 0 { + return "", fmt.Errorf("unexpected '%v'", string(in[idx])) + } + + content, in := in, "" + if idx > 0 { + content, in = content[:idx], content[idx:] + } + p.input = in + + return strings.TrimSpace(content), nil +} + +func (p *flagParser) parsePrimitive(stopSet string) (interface{}, error) { + content, err := p.parseNonQuotedString(stopSet) + if err != nil { + return nil, err + } + + if content == "null" { + return nil, nil + } + if b, ok := parseBoolValue(content); ok { + return b, nil + } + if n, err := strconv.ParseUint(content, 0, 64); err == nil { + return n, nil + } + if n, err := strconv.ParseInt(content, 0, 64); err == nil { + return n, nil + } + if n, err := strconv.ParseFloat(content, 64); err == nil { + return n, nil + } + + return content, nil +} + +func (p *flagParser) expectChar(c byte) error { + if p.input == "" || p.input[0] != c { + return fmt.Errorf("expected '%v'", string(c)) + } + + p.input = p.input[1:] + return nil +} + +func parseBoolValue(str string) (value bool, ok bool) { + switch str { + case "t", "T", "true", "TRUE", "True", "on", "ON": + return true, true + case "f", "F", "false", "FALSE", "False", "off", "OFF": + return false, true + } + return false, false +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/json/json.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/json/json.go index 96de03ab..2841103b 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/json/json.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/json/json.go @@ -8,7 +8,7 @@ import ( ) func NewConfig(in []byte, opts ...ucfg.Option) (*ucfg.Config, error) { - var m map[string]interface{} + var m interface{} if err := json.Unmarshal(in, &m); err != nil { return nil, err } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/merge.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/merge.go index b1851c78..34d149fa 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/merge.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/merge.go @@ -7,7 +7,52 @@ import ( "time" ) +// Merge a map, a slice, a struct or another Config object into c. +// +// Merge traverses the value from recursively copying all values into a hierarchy +// of Config objects plus primitives into c. +// +// Merge supports the options: PathSep, MetaData, StructTag, VarExp +// +// Merge uses the type-dependent default encodings: +// - Boolean values are encoded as booleans. +// - Integer are encoded as int64 values, unsigned integer values as uint64 and +// floats as float64 values. +// - Strings are copied into string values. +// If the VarExp is set, string fields will be parsed into +// variable expansion expressions. The expression can reference any +// other setting by absolute name. +// - Array and slices are copied into new Config objects with index accessors only. +// - Struct values and maps with key type string are encoded as Config objects with +// named field accessors. +// - Config objects will be copied and added to the current hierarchy. +// +// The `config` struct tag (configurable via StructTag option) can be used to +// set the field name and enable additional merging settings per field: +// +// // field appears in Config as key "myName" +// Field int `config:"myName"` +// +// // field appears in sub-Config "mySub" as key "myName" (requires PathSep(".")) +// Field int `config:"mySub.myName"` +// +// // field is processed as if keys are part of outer struct (type can be a +// // struct, a slice, an array, a map or of type *Config) +// Field map[string]interface{} `config:",inline"` +// +// +// Returns an error if merging fails to normalize and validate the from value. +// If duplicate setting names are detected in the input, merging fails as well. +// +// Config cannot represent cyclic structures and Merge does not handle them +// well. Passing cyclic structures to Merge will result in an infinite recursive +// loop. func (c *Config) Merge(from interface{}, options ...Option) error { + // from is empty in case of empty config file + if from == nil { + return nil + } + opts := makeOptions(options) other, err := normalize(opts, from) @@ -31,27 +76,13 @@ func mergeConfigDict(opts *options, to, from *Config) Error { field: k, } - old, ok := to.fields.get(k) - if !ok { - to.fields.set(k, v.cpy(ctx)) - continue - } - - subOld, err := old.toConfig(opts) + old, _ := to.fields.get(k) + merged, err := mergeValues(opts, old, v) if err != nil { - to.fields.set(k, v.cpy(ctx)) - continue - } - - subFrom, err := v.toConfig(opts) - if err != nil { - to.fields.set(k, v.cpy(ctx)) - continue - } - - if err := mergeConfig(opts, subOld, subFrom); err != nil { return err } + + to.fields.set(k, merged.cpy(ctx)) } return nil } @@ -69,23 +100,13 @@ func mergeConfigArr(opts *options, to, from *Config) Error { field: fmt.Sprintf("%v", i), } - v := from.fields.array()[i] - old := to.fields.array()[i] - subOld, err := old.toConfig(opts) + v := from.fields.array()[i] + merged, err := mergeValues(opts, old, v) if err != nil { - to.fields.setAt(i, cfgSub{to}, v.cpy(ctx)) - continue - } - - subFrom, err := v.toConfig(opts) - if err != nil { - to.fields.setAt(i, cfgSub{to}, v.cpy(ctx)) - } - - if err := mergeConfig(opts, subOld, subFrom); err != nil { return err } + to.fields.setAt(i, cfgSub{to}, merged.cpy(ctx)) } end := len(from.fields.array()) @@ -106,6 +127,30 @@ func mergeConfigArr(opts *options, to, from *Config) Error { return nil } +func mergeValues(opts *options, old, v value) (value, Error) { + if old == nil { + return v, nil + } + + // check if new and old value evaluate to sub-configurations. If one is no + // sub-configuration, use new value only. + subOld, err := old.toConfig(opts) + if err != nil { + return v, nil + } + subV, err := v.toConfig(opts) + if err != nil { + return v, nil + } + + // merge new and old evaluated sub-configurations and return subOld for + // reassigning to old key in case of subOld being generated dynamically + if err := mergeConfig(opts, subOld, subV); err != nil { + return nil, err + } + return cfgSub{subOld}, nil +} + // convert from into normalized *Config checking for errors // before merging generated(normalized) config with current config func normalize(opts *options, from interface{}) (*Config, Error) { @@ -128,11 +173,18 @@ func normalize(opts *options, from interface{}) (*Config, Error) { return normalizeStruct(opts, vFrom) case reflect.Map: return normalizeMap(opts, vFrom) + case reflect.Array, reflect.Slice: + tmp, err := normalizeArray(opts, tagOptions{}, context{}, vFrom) + if err != nil { + return nil, err + } + c, _ := tmp.toConfig(opts) + return c, nil } } - return nil, raiseInvalidTopLevelType(from) + return nil, raiseInvalidTopLevelType(from, opts.meta) } func normalizeMap(opts *options, from reflect.Value) (*Config, Error) { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/opts.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/opts.go index 514a480f..c2ecd9fe 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/opts.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/opts.go @@ -2,6 +2,8 @@ package ucfg import "os" +// Option type implementing additional options to be passed +// to go-ucfg library functions. type Option func(*options) type options struct { @@ -12,64 +14,122 @@ type options struct { env []*Config resolvers []func(name string) (string, error) varexp bool + + // temporary cache of parsed splice values for lifetime of call to + // Unpack/Pack/Get/... + parsed valueCache } +type valueCache map[string]spliceValue + +// id used to store intermediate parse results in current execution context. +// As parsing results might differ between multiple calls due to: +// splice being shared between multiple configurations, or environment +// changing between calls + lazy nature of cfgSplice, parsing results cannot +// be stored in cfgSplice itself. +type cacheID string + +type spliceValue struct { + err error + value value +} + +// StructTag option sets the struct tag name to use for looking up +// field names and options in `Unpack` and `Merge`. +// The default struct tag in `config`. func StructTag(tag string) Option { return func(o *options) { o.tag = tag } } +// ValidatorTag option sets the struct tag name used to set validators +// on struct fields in `Unpack`. +// The default struct tag in `validate`. func ValidatorTag(tag string) Option { return func(o *options) { o.validatorTag = tag } } +// PathSep sets the path separator used to split up names into a tree like hierarchy. +// If PathSep is not set, field names will not be split. func PathSep(sep string) Option { return func(o *options) { o.pathSep = sep } } +// MetaData option passes additional metadata (currently only source of the +// configuration) to be stored internally (e.g. for error reporting). func MetaData(meta Meta) Option { return func(o *options) { o.meta = &meta } } +// Env option adds another configuration for variable expansion to be used, if +// the path to look up does not exist in the actual configuration. Env can be used +// multiple times in order to add more lookup environments. func Env(e *Config) Option { return func(o *options) { o.env = append(o.env, e) } } +// Resolve option adds a callback used by variable name expansion. The callback +// will be called if a variable can not be resolved from within the actual configuration +// or any of its environments. func Resolve(fn func(name string) (string, error)) Option { return func(o *options) { o.resolvers = append(o.resolvers, fn) } } -var ResolveEnv = Resolve(func(name string) (string, error) { - value := os.Getenv(name) - if value == "" { - return "", ErrMissing - } - return value, nil -}) +// ResolveEnv option adds a look up callback looking up values in the available +// OS environment variables. +var ResolveEnv Option = doResolveEnv -var VarExp Option = func(o *options) { - o.varexp = true +func doResolveEnv(o *options) { + o.resolvers = append(o.resolvers, func(name string) (string, error) { + value := os.Getenv(name) + if value == "" { + return "", ErrMissing + } + return value, nil + }) } +// VarExp option enables support for variable expansion. Resolve and Env options will only be effective if VarExp is set. +var VarExp Option = doVarExp + +func doVarExp(o *options) { o.varexp = true } + func makeOptions(opts []Option) *options { o := options{ tag: "config", validatorTag: "validate", pathSep: "", // no separator by default + parsed: map[string]spliceValue{}, } for _, opt := range opts { opt(&o) } return &o } + +func (cache valueCache) cachedValue( + id cacheID, + f func() (value, error), +) (value, error) { + if v, ok := cache[string(id)]; ok { + if v.err != nil { + return nil, v.err + } + return v.value, nil + } + + v, err := f() + cache[string(id)] = spliceValue{err, v} + return v, err +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/reify.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/reify.go index 65a4a44e..493f0da2 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/reify.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/reify.go @@ -6,6 +6,96 @@ import ( "time" ) +// Unpack unpacks c into a struct, a map, or a slice allocating maps, slices, +// and pointers as necessary. +// +// Unpack supports the options: PathSep, StructTag, ValidatorTag, Env, Resolve, +// ResolveEnv. +// +// When unpacking into a value, Unpack first will try to call Unpack if the +// value implements the Unpacker interface. Otherwise, Unpack tries to convert +// the internal value into the target type: +// +// # Primitive types +// +// bool: requires setting of type bool or string which parses into a +// boolean value (true, false, on, off) +// int(8, 16, 32, 64): requires any number type convertible to int or a string +// parsing to int. Fails if the target value would overflow. +// uint(8, 16, 32, 64): requires any number type convertible to int or a string +// parsing to int. Fails if the target value is negative or would overflow. +// float(32, 64): requires any number type convertible to float or a string +// parsing to float. Fails if the target value is negative or would overflow. +// string: requires any primitive value which is serialized into a string. +// +// # Special types: +// +// time.Duration: requires a number setting converted to seconds or a string +// parsed into time.Duration via time.ParseDuration. +// *regexp.Regexp: requires a string being compiled into a regular expression +// using regexp.Compile. +// *Config: requires a Config object to be stored by pointer into the target +// value. Can be used to capture a sub-Config without interpreting +// the settings yet. +// +// # Arrays/Slices: +// +// Requires a Config object with indexed entries. Named entries will not be +// unpacked into the Array/Slice. Primitive values will be handled like arrays +// of length 1. +// +// # Map +// +// Requires a Config object with all named top-level entries being unpacked into +// the map. +// +// # Struct +// +// Requires a Config object. All named values in the Config object will be unpacked +// into the struct its fields, if the name is available in the struct. +// A field its name is set using the `config` struct tag (configured by StructTag) +// If tag is missing or no field name is configured in the tag, the field name +// itself will be used. +// +// +// Fields available in a struct or a map, but not in the Config object, will not +// be touched. Default values should be set in the target value before calling Unpack. +// +// Type aliases like "type myTypeAlias T" are unpacked using Unpack if the alias +// implements the Unpacker interface. Otherwise unpacking rules for type T will be used. +// +// When unpacking a value, the Validate method will be called if the value +// implements the Validator interface. Unpacking a struct field the validator +// options will be applied to the unpacked value as well. +// +// Struct field validators are set using the `validate` tag (configurable by +// ValidatorTag). Default validators options are: +// +// required: check value is set and not empty +// nonzero: check numeric value != 0 or string/slice not being empty +// positive: check numeric value >= 0 +// min=: check numeric value >= . If target type is time.Duration, +// can be a duration. +// max=: check numeric value <= . If target type is time.Duration, +// can be a duration. +// +// If a config value is not the convertible to the target type, or overflows the +// target type, Unpack will abort immediately and return the appropriate error. +// +// If validator tags or validation provided by Validate or Unmarshal fails, +// Unpack will abort immediately and return the validate error. +// +// When unpacking into an interface{} value, Unpack will store a value of one of +// these types in the value: +// +// bool for boolean values +// int64 for signed integer values +// uint64 for unsigned integer values +// float64 for floating point values +// string for string values +// []interface{} for list-only Config objects +// map[string]interface{} for Config objects +// nil for pointers if key has a nil value func (c *Config) Unpack(to interface{}, options ...Option) error { opts := makeOptions(options) @@ -52,7 +142,7 @@ func reifyInto(opts *options, to reflect.Value, from *Config) Error { return nil } - return raiseInvalidTopLevelType(to.Interface()) + return raiseInvalidTopLevelType(to.Interface(), opts.meta) } func reifyMap(opts *options, to reflect.Value, from *Config) Error { @@ -98,13 +188,9 @@ func reifyStruct(opts *options, orig reflect.Value, cfg *Config) Error { to.Set(orig) } - if v, ok := implementsUnpacker(to); ok { - reified, err := cfgSub{cfg}.reify(opts) + if v, ok := valueIsUnpacker(to); ok { + err := unpackWith(opts, v, cfgSub{cfg}) if err != nil { - return raisePathErr(err, cfg.metadata, "", cfg.Path(".")) - } - - if err := unpackWith(cfg.ctx, cfg.metadata, v, reified); err != nil { return err } } else { @@ -273,6 +359,15 @@ func reifyMergeValue( } baseType := chaseTypePointers(old.Type()) + + if v, ok := valueIsUnpacker(old); ok { + err := unpackWith(opts.opts, v, val) + if err != nil { + return reflect.Value{}, err + } + return old, nil + } + if tConfig.ConvertibleTo(baseType) { sub, err := val.toConfig(opts.opts) if err != nil { @@ -328,18 +423,6 @@ func reifyMergeValue( return reifySlice(opts, baseType, val) } - if v, ok := implementsUnpacker(old); ok { - reified, err := val.reify(opts.opts) - if err != nil { - ctx := val.Context() - return reflect.Value{}, raisePathErr(err, val.meta(), "", ctx.path(".")) - } - - if err := unpackWith(val.Context(), val.meta(), v, reified); err != nil { - return reflect.Value{}, err - } - return old, nil - } return reifyPrimitive(opts, val, t, baseType) } @@ -400,10 +483,10 @@ func castArr(opts *options, v value) ([]value, Error) { if sub, ok := v.(cfgSub); ok { return sub.c.fields.array(), nil } - if ref, ok := v.(*cfgRef); ok { - unrefed, err := ref.resolve(opts) + if ref, ok := v.(*cfgDynamic); ok { + unrefed, err := ref.getValue(opts) if err != nil { - return nil, raiseMissing(ref.ctx.getParent(), ref.ref.String()) + return nil, raiseMissingMsg(ref.ctx.getParent(), ref.ctx.field, err.Error()) } if sub, ok := unrefed.(cfgSub); ok { @@ -439,13 +522,8 @@ func reifyPrimitive( var ok bool if v, ok = typeIsUnpacker(baseType); ok { - reified, err := val.reify(opts.opts) + err := unpackWith(opts.opts, v, val) if err != nil { - ctx := val.Context() - return reflect.Value{}, raisePathErr(err, val.meta(), "", ctx.path(".")) - } - - if err := unpackWith(val.Context(), val.meta(), v, reified); err != nil { return reflect.Value{}, err } } else { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/testdata/error/message/invalid_type_top_level_w_meta.golden b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/testdata/error/message/invalid_type_top_level_w_meta.golden new file mode 100644 index 00000000..4c1dfdaf --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/testdata/error/message/invalid_type_top_level_w_meta.golden @@ -0,0 +1 @@ +type 'string' is not supported on top level of config, only dictionary or list (source:'test.source') \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/testdata/error/message/invalid_type_top_level_wo_meta.golden b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/testdata/error/message/invalid_type_top_level_wo_meta.golden new file mode 100644 index 00000000..2325f15e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/testdata/error/message/invalid_type_top_level_wo_meta.golden @@ -0,0 +1 @@ +type 'string' is not supported on top level of config, only dictionary or list \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/testdata/error/message/missing_msg_nested_w_meta.golden b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/testdata/error/message/missing_msg_nested_w_meta.golden new file mode 100644 index 00000000..98011576 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/testdata/error/message/missing_msg_nested_w_meta.golden @@ -0,0 +1 @@ +custom error message accessing 'nested.field' (source:'test.source') \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/testdata/error/message/missing_msg_nested_wo_meta.golden b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/testdata/error/message/missing_msg_nested_wo_meta.golden new file mode 100644 index 00000000..333f107e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/testdata/error/message/missing_msg_nested_wo_meta.golden @@ -0,0 +1 @@ +custom error message accessing 'nested.field' \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/testdata/error/message/missing_msg_w_meta.golden b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/testdata/error/message/missing_msg_w_meta.golden new file mode 100644 index 00000000..a18bcf74 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/testdata/error/message/missing_msg_w_meta.golden @@ -0,0 +1 @@ +custom error message accessing 'field' (source:'test.source') \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/testdata/error/message/missing_msg_wo_meta.golden b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/testdata/error/message/missing_msg_wo_meta.golden new file mode 100644 index 00000000..ebbd13c9 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/testdata/error/message/missing_msg_wo_meta.golden @@ -0,0 +1 @@ +custom error message accessing 'field' \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/types.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/types.go index dd96a811..3bb26681 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/types.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/types.go @@ -5,6 +5,11 @@ import ( "math" "reflect" "strconv" + "strings" + "sync/atomic" + + "github.com/elastic/go-ucfg/internal/parse" + uuid "github.com/satori/go.uuid" ) type value interface { @@ -70,16 +75,6 @@ type cfgSub struct { c *Config } -type cfgRef struct { - cfgPrimitive - ref *reference -} - -type cfgSplice struct { - cfgPrimitive - splice varEvaler -} - type cfgNil struct{ cfgPrimitive } type cfgPrimitive struct { @@ -87,6 +82,25 @@ type cfgPrimitive struct { metadata *Meta } +type cfgDynamic struct { + cfgPrimitive + id cacheID + dyn dynValue +} + +type dynValue interface { + getValue(p *cfgPrimitive, opts *options) (value, error) + String() string +} + +type refDynValue reference + +type spliceDynValue struct { + e varEvaler +} + +var spliceSeq int32 + func (c *context) empty() bool { return c.parent == nil } @@ -144,12 +158,17 @@ func newString(ctx context, m *Meta, s string) *cfgString { return &cfgString{cfgPrimitive{ctx, m}, s} } -func newRef(ctx context, m *Meta, ref *reference) *cfgRef { - return &cfgRef{cfgPrimitive{ctx, m}, ref} +func newRef(ctx context, m *Meta, ref *reference) *cfgDynamic { + return newDyn(ctx, m, (*refDynValue)(ref)) } -func newSplice(ctx context, m *Meta, s varEvaler) *cfgSplice { - return &cfgSplice{cfgPrimitive{ctx, m}, s} +func newSplice(ctx context, m *Meta, s varEvaler) *cfgDynamic { + return newDyn(ctx, m, spliceDynValue{s}) +} + +func newDyn(ctx context, m *Meta, val dynValue) *cfgDynamic { + id := string(atomic.AddInt32(&spliceSeq, 1)) + uuid.NewV4().String() + return &cfgDynamic{cfgPrimitive{ctx, m}, cacheID(id), val} } func (p *cfgPrimitive) Context() context { return p.ctx } @@ -268,6 +287,7 @@ func (c cfgSub) toConfig(*options) (*Config, error) { return c.c, nil } func (c cfgSub) Len(*options) (int, error) { arr := c.c.fields.array() if arr != nil { + return len(arr), nil } @@ -366,148 +386,165 @@ func (c cfgSub) reify(opts *options) (interface{}, error) { } } -func (r *cfgRef) typ(opt *options) (typeInfo, error) { - v, err := r.resolve(opt) - if err != nil { - return typeInfo{}, err +func (d *cfgDynamic) typ(opts *options) (ti typeInfo, err error) { + d.withValue(&err, opts, func(v value) { + ti, err = v.typ(opts) + }) + return +} + +func (d *cfgDynamic) cpy(c context) value { + return newDyn(c, d.meta(), d.dyn) +} + +func (d *cfgDynamic) Len(opts *options) (l int, err error) { + d.withValue(&err, opts, func(v value) { + l, err = v.Len(opts) + }) + return +} + +func (d *cfgDynamic) reflect(opts *options) (rv reflect.Value, err error) { + d.withValue(&err, opts, func(v value) { + rv, err = v.reflect(opts) + }) + return +} + +func (d *cfgDynamic) reify(opts *options) (rv interface{}, err error) { + d.withValue(&err, opts, func(v value) { + rv, err = v.reify(opts) + }) + return +} + +func (d *cfgDynamic) toBool(opts *options) (b bool, err error) { + d.withValue(&err, opts, func(v value) { + b, err = v.toBool(opts) + }) + return +} + +func (d *cfgDynamic) toString(opts *options) (s string, err error) { + d.withValue(&err, opts, func(v value) { + s, err = v.toString(opts) + }) + return +} + +func (d *cfgDynamic) toInt(opts *options) (i int64, err error) { + d.withValue(&err, opts, func(v value) { + i, err = v.toInt(opts) + }) + return +} + +func (d *cfgDynamic) toUint(opts *options) (u uint64, err error) { + d.withValue(&err, opts, func(v value) { + u, err = v.toUint(opts) + }) + return +} + +func (d *cfgDynamic) toFloat(opts *options) (f float64, err error) { + d.withValue(&err, opts, func(v value) { + f, err = v.toFloat(opts) + }) + return +} + +func (d *cfgDynamic) toConfig(opts *options) (cfg *Config, err error) { + d.withValue(&err, opts, func(v value) { + cfg, err = v.toConfig(opts) + }) + return +} + +func (d *cfgDynamic) withValue(err *error, opts *options, fn func(value)) { + var v value + if v, *err = d.getValue(opts); *err == nil { + fn(v) } - return v.typ(opt) } -func (r *cfgRef) cpy(ctx context) value { - return newRef(ctx, r.meta(), r.ref) +func (d *cfgDynamic) getValue(opts *options) (value, error) { + return opts.parsed.cachedValue(d.id, func() (value, error) { + return d.dyn.getValue(&d.cfgPrimitive, opts) + }) } -func (r *cfgRef) Len(opt *options) (int, error) { - v, err := r.resolve(opt) - if err != nil { - return 0, err +func (r *refDynValue) String() string { + ref := (*reference)(r) + return ref.String() +} + +func (r *refDynValue) getValue( + p *cfgPrimitive, + opts *options, +) (value, error) { + ref := (*reference)(r) + v, err := ref.resolveRef(p.ctx.getParent(), opts) + if v != nil || err != nil { + return v, err } - return v.Len(opt) -} -func (r *cfgRef) reflect(opts *options) (reflect.Value, error) { - v, err := r.resolve(opts) - if err != nil { - return reflect.Value{}, err - } - return v.reflect(opts) -} - -func (r *cfgRef) reify(opts *options) (interface{}, error) { - v, err := r.resolve(opts) - if err != nil { - return reflect.Value{}, err - } - return v.reify(opts) -} - -func (r *cfgRef) toBool(opts *options) (bool, error) { - v, err := r.resolve(opts) - if err != nil { - return false, err - } - return v.toBool(opts) -} - -func (r *cfgRef) toString(opts *options) (string, error) { - v, err := r.resolve(opts) - if err != nil { - return "", err - } - return v.toString(opts) -} - -func (r *cfgRef) toInt(opts *options) (int64, error) { - v, err := r.resolve(opts) - if err != nil { - return 0, err - } - return v.toInt(opts) -} - -func (r *cfgRef) toUint(opts *options) (uint64, error) { - v, err := r.resolve(opts) - if err != nil { - return 0, err - } - return v.toUint(opts) -} - -func (r *cfgRef) toFloat(opts *options) (float64, error) { - v, err := r.resolve(opts) - if err != nil { - return 0, err - } - return v.toFloat(opts) -} - -func (r *cfgRef) toConfig(opts *options) (*Config, error) { - v, err := r.resolve(opts) + str, err := ref.resolveEnv(p.ctx.getParent(), opts) if err != nil { return nil, err } - return v.toConfig(opts) + return parseValue(p, opts, str) } -func (r *cfgRef) resolve(opts *options) (value, error) { - return r.ref.resolve(r.ctx.getParent(), opts) -} - -func (*cfgSplice) typ(*options) (typeInfo, error) { - return typeInfo{"string", tString}, nil -} - -func (s *cfgSplice) cpy(ctx context) value { - return newSplice(ctx, s.meta(), s.splice) -} - -func (s *cfgSplice) reflect(opt *options) (reflect.Value, error) { - str, err := s.toString(opt) +func (s spliceDynValue) getValue( + p *cfgPrimitive, + opts *options, +) (value, error) { + splice := s.e + str, err := splice.eval(p.ctx.getParent(), opts) if err != nil { - return reflect.Value{}, err + return nil, err } - return reflect.ValueOf(str), err + + return parseValue(p, opts, str) } -func (s *cfgSplice) reify(opt *options) (interface{}, error) { - return s.toString(opt) +func (s spliceDynValue) String() string { + return "" } -func (s *cfgSplice) toBool(opt *options) (bool, error) { - str, err := s.toString(opt) +func parseValue(p *cfgPrimitive, opts *options, str string) (value, error) { + ifc, err := parse.Value(str) if err != nil { - return false, err + return nil, err } - return strconv.ParseBool(str) -} -func (s *cfgSplice) toString(opts *options) (string, error) { - return s.splice.eval(s.ctx.getParent(), opts) -} + if ifc == nil { + if strings.TrimSpace(str) == "" { + return newString(p.ctx, p.meta(), str), nil + } + return &cfgNil{cfgPrimitive{ctx: p.ctx, metadata: p.meta()}}, nil + } -func (s *cfgSplice) toInt(opt *options) (int64, error) { - str, err := s.toString(opt) + switch v := ifc.(type) { + case bool: + return newBool(p.ctx, p.meta(), v), nil + case int64: + return newInt(p.ctx, p.meta(), v), nil + case uint64: + return newUint(p.ctx, p.meta(), v), nil + case float64: + return newFloat(p.ctx, p.meta(), v), nil + case string: + return newString(p.ctx, p.meta(), v), nil + } + + sub, err := normalize(opts, ifc) if err != nil { - return 0, err + return nil, err } - return strconv.ParseInt(str, 0, 64) -} - -func (s *cfgSplice) toUint(opt *options) (uint64, error) { - str, err := s.toString(opt) - if err != nil { - return 0, err - } - return strconv.ParseUint(str, 0, 64) -} - -func (s *cfgSplice) toFloat(opt *options) (float64, error) { - str, err := s.toString(opt) - if err != nil { - return 0, err - } - return strconv.ParseFloat(str, 64) + sub.ctx = p.ctx + sub.metadata = p.metadata + return cfgSub{sub}, nil } func isNil(v value) bool { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/ucfg.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/ucfg.go index 9972f20f..e959d5c0 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/ucfg.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/ucfg.go @@ -7,6 +7,15 @@ import ( "time" ) +// Config object to store hierarchical configurations into. Config can be +// both a dictionary and a list holding primitive values. Primitive values +// can be booleans, integers, float point numbers and strings. +// +// Config provides a low level interface for setting and getting settings +// via SetBool, SetInt, SetUing, SetFloat, SetString, SetChild, Bool, Int, Uint, +// Float, String, and Child. +// +// A more user-friendly high level interface is provided via Unpack and Merge. type Config struct { ctx context metadata *Meta @@ -24,15 +33,11 @@ type fields struct { a []value } -// Meta holds additional meta data per config value +// Meta holds additional meta data per config value. type Meta struct { Source string } -type Unpacker interface { - Unpack(interface{}) error -} - var ( tConfig = reflect.TypeOf(Config{}) tConfigPtr = reflect.PtrTo(tConfig) @@ -40,7 +45,6 @@ var ( tInterfaceArray = reflect.TypeOf([]interface{}(nil)) // interface types - tUnpacker = reflect.TypeOf((*Unpacker)(nil)).Elem() tValidator = reflect.TypeOf((*Validator)(nil)).Elem() // primitives @@ -53,12 +57,17 @@ var ( tRegexp = reflect.TypeOf(regexp.Regexp{}) ) +// New creates a new empty Config object. func New() *Config { return &Config{ fields: &fields{nil, nil}, } } +// NewFrom creates a new config object normalizing and copying from into the new +// Config object. NewFrom uses Merge to copy from. +// +// NewFrom supports the options: PathSep, MetaData, StructTag, VarExp func NewFrom(from interface{}, opts ...Option) (*Config, error) { c := New() if err := c.Merge(from, opts...); err != nil { @@ -67,14 +76,17 @@ func NewFrom(from interface{}, opts ...Option) (*Config, error) { return c, nil } +// IsDict checks if c has named keys. func (c *Config) IsDict() bool { return c.fields.dict() != nil } +// IsArray checks if c has index only accessible settings. func (c *Config) IsArray() bool { return c.fields.array() != nil } +// GetFields returns a list of all top-level named keys in c. func (c *Config) GetFields() []string { var names []string for k := range c.fields.dict() { @@ -83,19 +95,26 @@ func (c *Config) GetFields() []string { return names } +// HasField checks if c has a top-level named key name. func (c *Config) HasField(name string) bool { _, ok := c.fields.get(name) return ok } +// Path gets the absolute path of c separated by sep. If c is a root-Config an +// empty string will be returned. func (c *Config) Path(sep string) string { return c.ctx.path(sep) } +// PathOf gets the absolute path of a potential setting field in c with name +// separated by sep. func (c *Config) PathOf(field, sep string) string { return c.ctx.pathOf(field, sep) } +// Parent returns the parent configuration or nil if c is already a root +// Configuration. func (c *Config) Parent() *Config { ctx := c.ctx for { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/unpack.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/unpack.go new file mode 100644 index 00000000..da859700 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/unpack.go @@ -0,0 +1,165 @@ +package ucfg + +import "reflect" + +// Unpacker type used by Unpack to allow types to implement custom configuration +// unpacking. +type Unpacker interface { + // Unpack is called if a setting of field has a type implementing Unpacker. + // + // The interface{} value passed to Unpack can be of type: bool, int64, uint64, + // float64, string, []interface{} or map[string]interface{}. + Unpack(interface{}) error +} + +// BoolUnpacker interface specializes the Unpacker interface +// by casting values to bool when calling Unpack. +type BoolUnpacker interface { + Unpack(b bool) error +} + +// IntUnpacker interface specializes the Unpacker interface +// by casting values to int64 when calling Unpack. +type IntUnpacker interface { + Unpack(i int64) error +} + +// UintUnpacker interface specializes the Unpacker interface +// by casting values to uint64 when calling Unpack. +type UintUnpacker interface { + Unpack(u uint64) error +} + +// FloatUnpacker interface specializes the Unpacker interface +// by casting values to float64 when calling Unpack. +type FloatUnpacker interface { + Unpack(f float64) error +} + +// StringUnpacker interface specializes the Unpacker interface +// by casting values to string when calling Unpack. +type StringUnpacker interface { + Unpack(s string) error +} + +// ConfigUnpacker interface specializes the Unpacker interface +// by passing the the *Config object directly instead of +// transforming the *Config object into map[string]interface{}. +type ConfigUnpacker interface { + Unpack(c *Config) error +} + +var ( + // unpacker interface types + tUnpacker = reflect.TypeOf((*Unpacker)(nil)).Elem() + tBoolUnpacker = reflect.TypeOf((*BoolUnpacker)(nil)).Elem() + tIntUnpacker = reflect.TypeOf((*IntUnpacker)(nil)).Elem() + tUintUnpacker = reflect.TypeOf((*UintUnpacker)(nil)).Elem() + tFloatUnpacker = reflect.TypeOf((*FloatUnpacker)(nil)).Elem() + tStringUnpacker = reflect.TypeOf((*StringUnpacker)(nil)).Elem() + tConfigUnpacker = reflect.TypeOf((*ConfigUnpacker)(nil)).Elem() + + tUnpackers = [...]reflect.Type{ + tUnpacker, + tBoolUnpacker, + tIntUnpacker, + tUintUnpacker, + tFloatUnpacker, + tStringUnpacker, + tConfigUnpacker, + } +) + +// valueIsUnpacker checks if v implements the Unpacker interface. +// If there exists a pointer to v, the pointer to v is also tested. +func valueIsUnpacker(v reflect.Value) (reflect.Value, bool) { + for { + if implementsUnpacker(v.Type()) { + return v, true + } + + if !v.CanAddr() { + break + } + v = v.Addr() + } + + return reflect.Value{}, false +} + +func typeIsUnpacker(t reflect.Type) (reflect.Value, bool) { + if implementsUnpacker(t) { + return reflect.New(t).Elem(), true + } + + if implementsUnpacker(reflect.PtrTo(t)) { + return reflect.New(t), true + } + + return reflect.Value{}, false +} + +func implementsUnpacker(t reflect.Type) bool { + for _, tUnpack := range tUnpackers { + if t.Implements(tUnpack) { + return true + } + } + + return false +} + +func unpackWith(opts *options, v reflect.Value, with value) Error { + ctx := with.Context() + meta := with.meta() + + var err error + switch u := v.Interface().(type) { + case Unpacker: + var reified interface{} + if reified, err = with.reify(opts); err == nil { + err = u.Unpack(reified) + } + + case BoolUnpacker: + var b bool + if b, err = with.toBool(opts); err == nil { + err = u.Unpack(b) + } + + case IntUnpacker: + var n int64 + if n, err = with.toInt(opts); err == nil { + err = u.Unpack(n) + } + + case UintUnpacker: + var n uint64 + if n, err = with.toUint(opts); err == nil { + err = u.Unpack(n) + } + + case FloatUnpacker: + var f float64 + if f, err = with.toFloat(opts); err == nil { + err = u.Unpack(f) + } + + case StringUnpacker: + var s string + if s, err = with.toString(opts); err == nil { + err = u.Unpack(s) + } + + case ConfigUnpacker: + var c *Config + if c, err = with.toConfig(opts); err == nil { + err = u.Unpack(c) + } + } + + if err != nil { + return raisePathErr(err, meta, "", ctx.path(".")) + } + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/util.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/util.go index cb80d83d..664317ec 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/util.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/util.go @@ -131,37 +131,3 @@ func isFloat(k reflect.Kind) bool { return false } } - -func implementsUnpacker(v reflect.Value) (reflect.Value, bool) { - for { - if v.Type().Implements(tUnpacker) { - return v, true - } - - if !v.CanAddr() { - break - } - v = v.Addr() - } - return reflect.Value{}, false -} - -func typeIsUnpacker(t reflect.Type) (reflect.Value, bool) { - if t.Implements(tUnpacker) { - return reflect.New(t).Elem(), true - } - - if reflect.PtrTo(t).Implements(tUnpacker) { - return reflect.New(t), true - } - - return reflect.Value{}, false -} - -func unpackWith(ctx context, meta *Meta, v reflect.Value, with interface{}) Error { - err := v.Interface().(Unpacker).Unpack(with) - if err != nil { - return raisePathErr(err, meta, "", ctx.path(".")) - } - return nil -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/validator.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/validator.go index d1adb54e..e8bb96cb 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/validator.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/validator.go @@ -9,10 +9,19 @@ import ( "time" ) +// Validator interface provides additional validation support to Unpack. The +// Validate method will be executed for any type passed directly or indirectly to +// Unpack. +// +// If Validate fails with an error message, Unpack will add some +// context - like setting being accessed and file setting was read from - to the +// error message before returning the actual error. type Validator interface { Validate() error } +// ValidatorCallback is the type of optional validator tags to be registered via +// RegisterValidator. type ValidatorCallback func(interface{}, string) error type validatorTag struct { @@ -26,13 +35,21 @@ var ( ) func init() { - RegisterValidator("nonzero", validateNonZero) - RegisterValidator("positive", validatePositive) - RegisterValidator("min", validateMin) - RegisterValidator("max", validateMax) - RegisterValidator("required", validateRequired) + initRegisterValidator("nonzero", validateNonZero) + initRegisterValidator("positive", validatePositive) + initRegisterValidator("min", validateMin) + initRegisterValidator("max", validateMax) + initRegisterValidator("required", validateRequired) } +func initRegisterValidator(name string, cb ValidatorCallback) { + if err := RegisterValidator(name, cb); err != nil { + panic("Duplicate validator: " + name) + } +} + +// RegisterValidator adds a new validator option to the "validate" struct tag. +// The callback will be executed when unpacking into a struct field. func RegisterValidator(name string, cb ValidatorCallback) error { if _, exists := validators[name]; exists { return ErrDuplicateValidator diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/variables.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/variables.go index b82d0d77..b8037686 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/variables.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/variables.go @@ -87,7 +87,7 @@ func (r *reference) String() string { return fmt.Sprintf("${%v}", r.Path) } -func (r *reference) resolve(cfg *Config, opts *options) (value, error) { +func (r *reference) resolveRef(cfg *Config, opts *options) (value, error) { env := opts.env var err error @@ -114,7 +114,12 @@ func (r *reference) resolve(cfg *Config, opts *options) (value, error) { env = env[:len(env)-1] } - // try callbacks + return nil, err +} + +func (r *reference) resolveEnv(cfg *Config, opts *options) (string, error) { + var err error + if len(opts.resolvers) > 0 { key := r.Path.String() for i := len(opts.resolvers) - 1; i >= 0; i-- { @@ -122,12 +127,26 @@ func (r *reference) resolve(cfg *Config, opts *options) (value, error) { resolver := opts.resolvers[i] v, err = resolver(key) if err == nil { - return newString(context{field: key}, nil, v), nil + return v, nil } } } - return nil, err + return "", err +} + +func (r *reference) resolve(cfg *Config, opts *options) (value, error) { + v, err := r.resolveRef(cfg, opts) + if v != nil || err != nil { + return v, err + } + + s, err := r.resolveEnv(cfg, opts) + if s == "" || err != nil { + return nil, err + } + + return newString(context{field: r.Path.String()}, nil, s), nil } func (r *reference) eval(cfg *Config, opts *options) (string, error) { @@ -390,9 +409,7 @@ func lexer(in string) (<-chan token, <-chan error) { } func parseVarExp(lex <-chan token, pathSep string) (varEvaler, error) { - stack := []parseState{ - parseState{st: stLeft}, - } + stack := []parseState{{st: stLeft}} // parser loop for tok := range lex { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/yaml/yaml.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/yaml/yaml.go index d5449e0e..f5eaefeb 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/yaml/yaml.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-ucfg/yaml/yaml.go @@ -3,15 +3,17 @@ package yaml import ( "io/ioutil" - "github.com/elastic/go-ucfg" "gopkg.in/yaml.v2" + + "github.com/elastic/go-ucfg" ) func NewConfig(in []byte, opts ...ucfg.Option) (*ucfg.Config, error) { - var m map[string]interface{} + var m interface{} if err := yaml.Unmarshal(in, &m); err != nil { return nil, err } + return ucfg.NewFrom(m, opts...) } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/CHANGELOG.md b/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/CHANGELOG.md index 780680ee..3cc18f0c 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/CHANGELOG.md +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/CHANGELOG.md @@ -11,6 +11,7 @@ This project adheres to [Semantic Versioning](http://semver.org/). - Added `Swap` implementation for Windows based on page file metrics. #55 - Added support to `github.com/gosigar/sys/windows` for querying and enabling privileges in a process token. +- Added utility code for interfacing with linux NETLINK_INET_DIAG. #60 ### Changed - Changed several `OpenProcess` calls on Windows to request the lowest possible diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sys/linux/inetdiag.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sys/linux/inetdiag.go new file mode 100644 index 00000000..a2851d23 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sys/linux/inetdiag.go @@ -0,0 +1,376 @@ +// +build linux + +package linux + +import ( + "bytes" + "encoding/binary" + "fmt" + "hash/fnv" + "io" + "net" + "os" + "syscall" + "unsafe" + + "github.com/pkg/errors" +) + +// Enums / Constants + +const ( + // AllTCPStates is a flag to request all sockets in any TCP state. + AllTCPStates = ^uint32(0) + + // TCPDIAG_GETSOCK is the netlink message type for requesting TCP diag data. + // https://github.com/torvalds/linux/blob/v4.0/include/uapi/linux/inet_diag.h#L7 + TCPDIAG_GETSOCK = 18 + + // SOCK_DIAG_BY_FAMILY is the netlink message type for requestion socket + // diag data by family. This is newer and can be used with inet_diag_req_v2. + // https://github.com/torvalds/linux/blob/v4.0/include/uapi/linux/sock_diag.h#L6 + SOCK_DIAG_BY_FAMILY = 20 +) + +// AddressFamily is the address family of the socket. +type AddressFamily uint8 + +// https://github.com/torvalds/linux/blob/5924bbecd0267d87c24110cbe2041b5075173a25/include/linux/socket.h#L159 +const ( + AF_INET AddressFamily = 2 + AF_INET6 = 10 +) + +var addressFamilyNames = map[AddressFamily]string{ + AF_INET: "ipv4", + AF_INET6: "ipv6", +} + +func (af AddressFamily) String() string { + if fam, found := addressFamilyNames[af]; found { + return fam + } + return fmt.Sprintf("UNKNOWN (%d)", af) +} + +// TCPState represents the state of a TCP connection. +type TCPState uint8 + +// https://github.com/torvalds/linux/blob/5924bbecd0267d87c24110cbe2041b5075173a25/include/net/tcp_states.h#L16 +const ( + TCP_ESTABLISHED TCPState = iota + 1 + TCP_SYN_SENT + TCP_SYN_RECV + TCP_FIN_WAIT1 + TCP_FIN_WAIT2 + TCP_TIME_WAIT + TCP_CLOSE + TCP_CLOSE_WAIT + TCP_LAST_ACK + TCP_LISTEN + TCP_CLOSING /* Now a valid state */ +) + +var tcpStateNames = map[TCPState]string{ + TCP_ESTABLISHED: "ESTAB", + TCP_SYN_SENT: "SYN-SENT", + TCP_SYN_RECV: "SYN-RECV", + TCP_FIN_WAIT1: "FIN-WAIT-1", + TCP_FIN_WAIT2: "FIN-WAIT-2", + TCP_TIME_WAIT: "TIME-WAIT", + TCP_CLOSE: "UNCONN", + TCP_CLOSE_WAIT: "CLOSE-WAIT", + TCP_LAST_ACK: "LAST-ACK", + TCP_LISTEN: "LISTEN", + TCP_CLOSING: "CLOSING", +} + +func (s TCPState) String() string { + if state, found := tcpStateNames[s]; found { + return state + } + return "UNKNOWN" +} + +// Extensions that can be used in the InetDiagReqV2 request to ask for +// additional data. +// https://github.com/torvalds/linux/blob/v4.0/include/uapi/linux/inet_diag.h#L103 +const ( + INET_DIAG_NONE = 0 + INET_DIAG_MEMINFO = 1 << iota + INET_DIAG_INFO + INET_DIAG_VEGASINFO + INET_DIAG_CONG + INET_DIAG_TOS + INET_DIAG_TCLASS + INET_DIAG_SKMEMINFO + INET_DIAG_SHUTDOWN + INET_DIAG_DCTCPINFO + INET_DIAG_PROTOCOL /* response attribute only */ + INET_DIAG_SKV6ONLY + INET_DIAG_LOCALS + INET_DIAG_PEERS + INET_DIAG_PAD + INET_DIAG_MARK +) + +// NetlinkInetDiag sends the given netlink request parses the responses with the +// assumption that they are inet_diag_msgs. This will allocate a temporary +// buffer for reading from the socket whose size will be the length of a page +// (usually 32k). Use NetlinkInetDiagWithBuf if you want to provide your own +// buffer. +func NetlinkInetDiag(request syscall.NetlinkMessage) ([]*InetDiagMsg, error) { + return NetlinkInetDiagWithBuf(request, nil, nil) +} + +// NetlinkInetDiagWithBuf sends the given netlink request parses the responses +// with the assumption that they are inet_diag_msgs. readBuf will be used to +// hold the raw data read from the socket. If the length is not large enough to +// hold the socket contents the data will be truncated. If readBuf is nil then a +// temporary buffer will be allocated for each invocation. The resp writer, if +// non-nil, will receive a copy of all bytes read (this is useful for +// debugging). +func NetlinkInetDiagWithBuf(request syscall.NetlinkMessage, readBuf []byte, resp io.Writer) ([]*InetDiagMsg, error) { + s, err := syscall.Socket(syscall.AF_NETLINK, syscall.SOCK_RAW, syscall.NETLINK_INET_DIAG) + if err != nil { + return nil, err + } + defer syscall.Close(s) + + lsa := &syscall.SockaddrNetlink{Family: syscall.AF_NETLINK} + if err := syscall.Sendto(s, serialize(request), 0, lsa); err != nil { + return nil, err + } + + if len(readBuf) == 0 { + // Default size used in libnl. + readBuf = make([]byte, os.Getpagesize()) + } + + var inetDiagMsgs []*InetDiagMsg +done: + for { + buf := readBuf + nr, _, err := syscall.Recvfrom(s, buf, 0) + if err != nil { + return nil, err + } + if nr < syscall.NLMSG_HDRLEN { + return nil, syscall.EINVAL + } + + buf = buf[:nr] + + // Dump raw data for inspection purposes. + if resp != nil { + if _, err := resp.Write(buf); err != nil { + return nil, err + } + } + + msgs, err := syscall.ParseNetlinkMessage(buf) + if err != nil { + return nil, err + } + + for _, m := range msgs { + if m.Header.Type == syscall.NLMSG_DONE { + break done + } + if m.Header.Type == syscall.NLMSG_ERROR { + return nil, ParseNetlinkError(m.Data) + } + + inetDiagMsg, err := ParseInetDiagMsg(m.Data) + if err != nil { + return nil, err + } + inetDiagMsgs = append(inetDiagMsgs, inetDiagMsg) + } + } + return inetDiagMsgs, nil +} + +func serialize(msg syscall.NetlinkMessage) []byte { + msg.Header.Len = uint32(syscall.SizeofNlMsghdr + len(msg.Data)) + b := make([]byte, msg.Header.Len) + binary.LittleEndian.PutUint32(b[0:4], msg.Header.Len) + binary.LittleEndian.PutUint16(b[4:6], msg.Header.Type) + binary.LittleEndian.PutUint16(b[6:8], msg.Header.Flags) + binary.LittleEndian.PutUint32(b[8:12], msg.Header.Seq) + binary.LittleEndian.PutUint32(b[12:16], msg.Header.Pid) + copy(b[16:], msg.Data) + return b +} + +// Request messages. + +var sizeofInetDiagReq = int(unsafe.Sizeof(InetDiagReq{})) + +// InetDiagReq (inet_diag_req) is used to request diagnostic data from older +// kernels. +// https://github.com/torvalds/linux/blob/v4.0/include/uapi/linux/inet_diag.h#L25 +type InetDiagReq struct { + Family uint8 + SrcLen uint8 + DstLen uint8 + Ext uint8 + ID InetDiagSockID + States uint32 // States to dump. + DBs uint32 // Tables to dump. +} + +func (r InetDiagReq) toWireFormat() []byte { + buf := bytes.NewBuffer(make([]byte, sizeofInetDiagReq)) + buf.Reset() + if err := binary.Write(buf, binary.LittleEndian, r); err != nil { + // This never returns an error. + panic(err) + } + return buf.Bytes() +} + +// NewInetDiagReq returns a new NetlinkMessage whose payload is an InetDiagReq. +// Callers should set their own sequence number in the returned message header. +func NewInetDiagReq() syscall.NetlinkMessage { + hdr := syscall.NlMsghdr{ + Type: uint16(TCPDIAG_GETSOCK), + Flags: uint16(syscall.NLM_F_DUMP | syscall.NLM_F_REQUEST), + Pid: uint32(0), + } + req := InetDiagReq{ + Family: uint8(AF_INET), // This returns both ipv4 and ipv6. + States: AllTCPStates, + } + + return syscall.NetlinkMessage{Header: hdr, Data: req.toWireFormat()} +} + +// V2 Request + +var sizeofInetDiagReqV2 = int(unsafe.Sizeof(InetDiagReqV2{})) + +// InetDiagReqV2 (inet_diag_req_v2) is used to request diagnostic data. +// https://github.com/torvalds/linux/blob/v4.0/include/uapi/linux/inet_diag.h#L37 +type InetDiagReqV2 struct { + Family uint8 + Protocol uint8 + Ext uint8 + Pad uint8 + States uint32 + ID InetDiagSockID +} + +func (r InetDiagReqV2) toWireFormat() []byte { + buf := bytes.NewBuffer(make([]byte, sizeofInetDiagReqV2)) + buf.Reset() + if err := binary.Write(buf, binary.LittleEndian, r); err != nil { + // This never returns an error. + panic(err) + } + return buf.Bytes() +} + +// NewInetDiagReqV2 returns a new NetlinkMessage whose payload is an +// InetDiagReqV2. Callers should set their own sequence number in the returned +// message header. +func NewInetDiagReqV2(af AddressFamily) syscall.NetlinkMessage { + hdr := syscall.NlMsghdr{ + Type: uint16(SOCK_DIAG_BY_FAMILY), + Flags: uint16(syscall.NLM_F_DUMP | syscall.NLM_F_REQUEST), + Pid: uint32(0), + } + req := InetDiagReqV2{ + Family: uint8(af), + Protocol: syscall.IPPROTO_TCP, + States: AllTCPStates, + } + + return syscall.NetlinkMessage{Header: hdr, Data: req.toWireFormat()} +} + +// Response messages. + +// InetDiagMsg (inet_diag_msg) is the base info structure. It contains socket +// identity (addrs/ports/cookie) and the information shown by netstat. +// https://github.com/torvalds/linux/blob/v4.0/include/uapi/linux/inet_diag.h#L86 +type InetDiagMsg struct { + Family uint8 // Address family. + State uint8 // TCP State + Timer uint8 + Retrans uint8 + + ID InetDiagSockID + + Expires uint32 + RQueue uint32 // Recv-Q + WQueue uint32 // Send-Q + UID uint32 // UID + Inode uint32 // Inode of socket. +} + +// ParseInetDiagMsg parse an InetDiagMsg from a byte slice. It assumes the +// InetDiagMsg starts at the beginning of b. Invoke this method to parse the +// payload of a netlink response. +func ParseInetDiagMsg(b []byte) (*InetDiagMsg, error) { + r := bytes.NewReader(b) + inetDiagMsg := &InetDiagMsg{} + err := binary.Read(r, binary.LittleEndian, inetDiagMsg) + if err != nil { + return nil, errors.Wrap(err, "failed to unmarshal inet_diag_msg") + } + return inetDiagMsg, nil +} + +// SrcPort returns the source (local) port. +func (m InetDiagMsg) SrcPort() int { return int(binary.BigEndian.Uint16(m.ID.SPort[:])) } + +// DstPort returns the destination (remote) port. +func (m InetDiagMsg) DstPort() int { return int(binary.BigEndian.Uint16(m.ID.DPort[:])) } + +// SrcIP returns the source (local) IP. +func (m InetDiagMsg) SrcIP() net.IP { return ip(m.ID.Src, AddressFamily(m.Family)) } + +// DstIP returns the destination (remote) IP. +func (m InetDiagMsg) DstIP() net.IP { return ip(m.ID.Dst, AddressFamily(m.Family)) } + +func (m InetDiagMsg) srcIPBytes() []byte { return ipBytes(m.ID.Src, AddressFamily(m.Family)) } +func (m InetDiagMsg) dstIPBytes() []byte { return ipBytes(m.ID.Dst, AddressFamily(m.Family)) } + +func ip(data [16]byte, af AddressFamily) net.IP { + if af == AF_INET { + return net.IPv4(data[0], data[1], data[2], data[3]) + } + return net.IP(data[:]) +} + +func ipBytes(data [16]byte, af AddressFamily) []byte { + if af == AF_INET { + return data[:4] + } + + return data[:] +} + +// FastHash returns a hash calculated using FNV-1 of the source and destination +// addresses. +func (m *InetDiagMsg) FastHash() uint64 { + // Hash using FNV-1 algorithm. + h := fnv.New64() + h.Write(m.srcIPBytes()) // Must trim non-zero garbage from ipv4 buffers. + h.Write(m.dstIPBytes()) + h.Write(m.ID.SPort[:]) + h.Write(m.ID.DPort[:]) + return h.Sum64() +} + +// InetDiagSockID (inet_diag_sockid) contains the socket identity. +// https://github.com/torvalds/linux/blob/v4.0/include/uapi/linux/inet_diag.h#L13 +type InetDiagSockID struct { + SPort [2]byte // Source port (big-endian). + DPort [2]byte // Destination port (big-endian). + Src [16]byte // Source IP + Dst [16]byte // Destination IP + If uint32 + Cookie [2]uint32 +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sys/linux/netlink.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sys/linux/netlink.go new file mode 100644 index 00000000..3c50f456 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sys/linux/netlink.go @@ -0,0 +1,107 @@ +package linux + +import ( + "encoding/binary" + "errors" +) + +// ParseNetlinkError parses the errno from the data section of a +// syscall.NetlinkMessage. If netlinkData is less than 4 bytes an error +// describing the problem will be returned. +func ParseNetlinkError(netlinkData []byte) error { + if len(netlinkData) >= 4 { + errno := -binary.LittleEndian.Uint32(netlinkData[:4]) + return NetlinkErrno(errno) + } + return errors.New("received netlink error (data too short to read errno)") +} + +// NetlinkErrno represent the error code contained in a netlink message of +// type NLMSG_ERROR. +type NetlinkErrno uint32 + +// Netlink error codes. +const ( + NLE_SUCCESS NetlinkErrno = iota + NLE_FAILURE + NLE_INTR + NLE_BAD_SOCK + NLE_AGAIN + NLE_NOMEM + NLE_EXIST + NLE_INVAL + NLE_RANGE + NLE_MSGSIZE + NLE_OPNOTSUPP + NLE_AF_NOSUPPORT + NLE_OBJ_NOTFOUND + NLE_NOATTR + NLE_MISSING_ATTR + NLE_AF_MISMATCH + NLE_SEQ_MISMATCH + NLE_MSG_OVERFLOW + NLE_MSG_TRUNC + NLE_NOADDR + NLE_SRCRT_NOSUPPORT + NLE_MSG_TOOSHORT + NLE_MSGTYPE_NOSUPPORT + NLE_OBJ_MISMATCH + NLE_NOCACHE + NLE_BUSY + NLE_PROTO_MISMATCH + NLE_NOACCESS + NLE_PERM + NLE_PKTLOC_FILE + NLE_PARSE_ERR + NLE_NODEV + NLE_IMMUTABLE + NLE_DUMP_INTR + NLE_ATTRSIZE +) + +// https://github.com/thom311/libnl/blob/libnl3_2_28/lib/error.c +var netlinkErrorMsgs = map[NetlinkErrno]string{ + NLE_SUCCESS: "Success", + NLE_FAILURE: "Unspecific failure", + NLE_INTR: "Interrupted system call", + NLE_BAD_SOCK: "Bad socket", + NLE_AGAIN: "Try again", + NLE_NOMEM: "Out of memory", + NLE_EXIST: "Object exists", + NLE_INVAL: "Invalid input data or parameter", + NLE_RANGE: "Input data out of range", + NLE_MSGSIZE: "Message size not sufficient", + NLE_OPNOTSUPP: "Operation not supported", + NLE_AF_NOSUPPORT: "Address family not supported", + NLE_OBJ_NOTFOUND: "Object not found", + NLE_NOATTR: "Attribute not available", + NLE_MISSING_ATTR: "Missing attribute", + NLE_AF_MISMATCH: "Address family mismatch", + NLE_SEQ_MISMATCH: "Message sequence number mismatch", + NLE_MSG_OVERFLOW: "Kernel reported message overflow", + NLE_MSG_TRUNC: "Kernel reported truncated message", + NLE_NOADDR: "Invalid address for specified address family", + NLE_SRCRT_NOSUPPORT: "Source based routing not supported", + NLE_MSG_TOOSHORT: "Netlink message is too short", + NLE_MSGTYPE_NOSUPPORT: "Netlink message type is not supported", + NLE_OBJ_MISMATCH: "Object type does not match cache", + NLE_NOCACHE: "Unknown or invalid cache type", + NLE_BUSY: "Object busy", + NLE_PROTO_MISMATCH: "Protocol mismatch", + NLE_NOACCESS: "No Access", + NLE_PERM: "Operation not permitted", + NLE_PKTLOC_FILE: "Unable to open packet location file", + NLE_PARSE_ERR: "Unable to parse object", + NLE_NODEV: "No such device", + NLE_IMMUTABLE: "Immutable attribute", + NLE_DUMP_INTR: "Dump inconsistency detected, interrupted", + NLE_ATTRSIZE: "Attribute max length exceeded", +} + +func (e NetlinkErrno) Error() string { + if msg, found := netlinkErrorMsgs[e]; found { + return msg + } + + return netlinkErrorMsgs[NLE_FAILURE] +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sys/linux/sysconf_cgo.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sys/linux/sysconf_cgo.go index 411e0ce3..e557a66e 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sys/linux/sysconf_cgo.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sys/linux/sysconf_cgo.go @@ -7,6 +7,7 @@ package linux */ import "C" +// GetClockTicks returns the number of click ticks in one jiffie. func GetClockTicks() int { return int(C.sysconf(C._SC_CLK_TCK)) } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sys/linux/sysconf_nocgo.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sys/linux/sysconf_nocgo.go index 3b789672..005e0c12 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sys/linux/sysconf_nocgo.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sys/linux/sysconf_nocgo.go @@ -2,6 +2,7 @@ package linux +// GetClockTicks returns the number of click ticks in one jiffie. func GetClockTicks() int { return 100 } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/AUTHORS.md b/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/AUTHORS.md new file mode 100644 index 00000000..0c802dd8 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/AUTHORS.md @@ -0,0 +1,20 @@ +The Prometheus project was started by Matt T. Proud (emeritus) and +Julius Volz in 2012. + +Maintainers of this repository: + +* Tobias Schmidt + +The following individuals have contributed code to this repository +(listed in alphabetical order): + +* Armen Baghumian +* Bjoern Rabenstein +* David Cournapeau +* Ji-Hoon, Seol +* Jonas Große Sundrup +* Julius Volz +* Matthias Rampke +* Nicky Gerritsen +* Rémi Audebert +* Tobias Schmidt diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/CONTRIBUTING.md b/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/CONTRIBUTING.md new file mode 100644 index 00000000..5705f0fb --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/CONTRIBUTING.md @@ -0,0 +1,18 @@ +# Contributing + +Prometheus uses GitHub to manage reviews of pull requests. + +* If you have a trivial fix or improvement, go ahead and create a pull + request, addressing (with `@...`) one or more of the maintainers + (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request. + +* If you plan to do something more involved, first discuss your ideas + on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). + This will avoid unnecessary work and surely give you and us a good deal + of inspiration. + +* Relevant coding style guidelines are the [Go Code Review + Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) + and the _Formatting and style_ section of Peter Bourgon's [Go: Best + Practices for Production + Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/NOTICE b/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/NOTICE new file mode 100644 index 00000000..53c5e9aa --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/NOTICE @@ -0,0 +1,7 @@ +procfs provides functions to retrieve system, kernel and process +metrics from the pseudo-filesystem proc. + +Copyright 2014-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/README.md b/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/README.md new file mode 100644 index 00000000..6e7ee6b8 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/README.md @@ -0,0 +1,10 @@ +# procfs + +This procfs package provides functions to retrieve system, kernel and process +metrics from the pseudo-filesystem proc. + +*WARNING*: This package is a work in progress. Its API may still break in +backwards-incompatible ways without warnings. Use it at your own risk. + +[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs) +[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/doc.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/doc.go new file mode 100644 index 00000000..e2acd6d4 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/doc.go @@ -0,0 +1,45 @@ +// Copyright 2014 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package procfs provides functions to retrieve system, kernel and process +// metrics from the pseudo-filesystem proc. +// +// Example: +// +// package main +// +// import ( +// "fmt" +// "log" +// +// "github.com/prometheus/procfs" +// ) +// +// func main() { +// p, err := procfs.Self() +// if err != nil { +// log.Fatalf("could not get process: %s", err) +// } +// +// stat, err := p.NewStat() +// if err != nil { +// log.Fatalf("could not get process stat: %s", err) +// } +// +// fmt.Printf("command: %s\n", stat.Comm) +// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) +// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) +// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) +// } +// +package procfs diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/fs.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/fs.go new file mode 100644 index 00000000..49aaab05 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/fs.go @@ -0,0 +1,33 @@ +package procfs + +import ( + "fmt" + "os" + "path" +) + +// FS represents the pseudo-filesystem proc, which provides an interface to +// kernel data structures. +type FS string + +// DefaultMountPoint is the common mount point of the proc filesystem. +const DefaultMountPoint = "/proc" + +// NewFS returns a new FS mounted under the given mountPoint. It will error +// if the mount point can't be read. +func NewFS(mountPoint string) (FS, error) { + info, err := os.Stat(mountPoint) + if err != nil { + return "", fmt.Errorf("could not read %s: %s", mountPoint, err) + } + if !info.IsDir() { + return "", fmt.Errorf("mount point %s is not a directory", mountPoint) + } + + return FS(mountPoint), nil +} + +// Path returns the path of the given subsystem relative to the procfs root. +func (fs FS) Path(p ...string) string { + return path.Join(append([]string{string(fs)}, p...)...) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/ipvs.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/ipvs.go new file mode 100644 index 00000000..e7012f73 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/ipvs.go @@ -0,0 +1,224 @@ +package procfs + +import ( + "bufio" + "encoding/hex" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "strconv" + "strings" +) + +// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`. +type IPVSStats struct { + // Total count of connections. + Connections uint64 + // Total incoming packages processed. + IncomingPackets uint64 + // Total outgoing packages processed. + OutgoingPackets uint64 + // Total incoming traffic. + IncomingBytes uint64 + // Total outgoing traffic. + OutgoingBytes uint64 +} + +// IPVSBackendStatus holds current metrics of one virtual / real address pair. +type IPVSBackendStatus struct { + // The local (virtual) IP address. + LocalAddress net.IP + // The local (virtual) port. + LocalPort uint16 + // The transport protocol (TCP, UDP). + Proto string + // The remote (real) IP address. + RemoteAddress net.IP + // The remote (real) port. + RemotePort uint16 + // The current number of active connections for this virtual/real address pair. + ActiveConn uint64 + // The current number of inactive connections for this virtual/real address pair. + InactConn uint64 + // The current weight of this virtual/real address pair. + Weight uint64 +} + +// NewIPVSStats reads the IPVS statistics. +func NewIPVSStats() (IPVSStats, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return IPVSStats{}, err + } + + return fs.NewIPVSStats() +} + +// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem. +func (fs FS) NewIPVSStats() (IPVSStats, error) { + file, err := os.Open(fs.Path("net/ip_vs_stats")) + if err != nil { + return IPVSStats{}, err + } + defer file.Close() + + return parseIPVSStats(file) +} + +// parseIPVSStats performs the actual parsing of `ip_vs_stats`. +func parseIPVSStats(file io.Reader) (IPVSStats, error) { + var ( + statContent []byte + statLines []string + statFields []string + stats IPVSStats + ) + + statContent, err := ioutil.ReadAll(file) + if err != nil { + return IPVSStats{}, err + } + + statLines = strings.SplitN(string(statContent), "\n", 4) + if len(statLines) != 4 { + return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short") + } + + statFields = strings.Fields(statLines[2]) + if len(statFields) != 5 { + return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields") + } + + stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64) + if err != nil { + return IPVSStats{}, err + } + + return stats, nil +} + +// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs. +func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return []IPVSBackendStatus{}, err + } + + return fs.NewIPVSBackendStatus() +} + +// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem. +func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { + file, err := os.Open(fs.Path("net/ip_vs")) + if err != nil { + return nil, err + } + defer file.Close() + + return parseIPVSBackendStatus(file) +} + +func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) { + var ( + status []IPVSBackendStatus + scanner = bufio.NewScanner(file) + proto string + localAddress net.IP + localPort uint16 + err error + ) + + for scanner.Scan() { + fields := strings.Fields(string(scanner.Text())) + if len(fields) == 0 { + continue + } + switch { + case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port": + continue + case fields[0] == "TCP" || fields[0] == "UDP": + if len(fields) < 2 { + continue + } + proto = fields[0] + localAddress, localPort, err = parseIPPort(fields[1]) + if err != nil { + return nil, err + } + case fields[0] == "->": + if len(fields) < 6 { + continue + } + remoteAddress, remotePort, err := parseIPPort(fields[1]) + if err != nil { + return nil, err + } + weight, err := strconv.ParseUint(fields[3], 10, 64) + if err != nil { + return nil, err + } + activeConn, err := strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, err + } + inactConn, err := strconv.ParseUint(fields[5], 10, 64) + if err != nil { + return nil, err + } + status = append(status, IPVSBackendStatus{ + LocalAddress: localAddress, + LocalPort: localPort, + RemoteAddress: remoteAddress, + RemotePort: remotePort, + Proto: proto, + Weight: weight, + ActiveConn: activeConn, + InactConn: inactConn, + }) + } + } + return status, nil +} + +func parseIPPort(s string) (net.IP, uint16, error) { + tmp := strings.SplitN(s, ":", 2) + + if len(tmp) != 2 { + return nil, 0, fmt.Errorf("invalid IP:Port: %s", s) + } + + if len(tmp[0]) != 8 && len(tmp[0]) != 32 { + return nil, 0, fmt.Errorf("invalid IP: %s", tmp[0]) + } + + ip, err := hex.DecodeString(tmp[0]) + if err != nil { + return nil, 0, err + } + + port, err := strconv.ParseUint(tmp[1], 16, 16) + if err != nil { + return nil, 0, err + } + + return ip, uint16(port), nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/mdstat.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/mdstat.go new file mode 100644 index 00000000..d7a248c0 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/mdstat.go @@ -0,0 +1,138 @@ +package procfs + +import ( + "fmt" + "io/ioutil" + "regexp" + "strconv" + "strings" +) + +var ( + statuslineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`) + buildlineRE = regexp.MustCompile(`\((\d+)/\d+\)`) +) + +// MDStat holds info parsed from /proc/mdstat. +type MDStat struct { + // Name of the device. + Name string + // activity-state of the device. + ActivityState string + // Number of active disks. + DisksActive int64 + // Total number of disks the device consists of. + DisksTotal int64 + // Number of blocks the device holds. + BlocksTotal int64 + // Number of blocks on the device that are in sync. + BlocksSynced int64 +} + +// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos. +func (fs FS) ParseMDStat() (mdstates []MDStat, err error) { + mdStatusFilePath := fs.Path("mdstat") + content, err := ioutil.ReadFile(mdStatusFilePath) + if err != nil { + return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + } + + mdStates := []MDStat{} + lines := strings.Split(string(content), "\n") + for i, l := range lines { + if l == "" { + continue + } + if l[0] == ' ' { + continue + } + if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") { + continue + } + + mainLine := strings.Split(l, " ") + if len(mainLine) < 3 { + return mdStates, fmt.Errorf("error parsing mdline: %s", l) + } + mdName := mainLine[0] + activityState := mainLine[2] + + if len(lines) <= i+3 { + return mdStates, fmt.Errorf( + "error parsing %s: too few lines for md device %s", + mdStatusFilePath, + mdName, + ) + } + + active, total, size, err := evalStatusline(lines[i+1]) + if err != nil { + return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + } + + // j is the line number of the syncing-line. + j := i + 2 + if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line + j = i + 3 + } + + // If device is syncing at the moment, get the number of currently + // synced bytes, otherwise that number equals the size of the device. + syncedBlocks := size + if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") { + syncedBlocks, err = evalBuildline(lines[j]) + if err != nil { + return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + } + } + + mdStates = append(mdStates, MDStat{ + Name: mdName, + ActivityState: activityState, + DisksActive: active, + DisksTotal: total, + BlocksTotal: size, + BlocksSynced: syncedBlocks, + }) + } + + return mdStates, nil +} + +func evalStatusline(statusline string) (active, total, size int64, err error) { + matches := statuslineRE.FindStringSubmatch(statusline) + if len(matches) != 4 { + return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline) + } + + size, err = strconv.ParseInt(matches[1], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + } + + total, err = strconv.ParseInt(matches[2], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + } + + active, err = strconv.ParseInt(matches[3], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + } + + return active, total, size, nil +} + +func evalBuildline(buildline string) (syncedBlocks int64, err error) { + matches := buildlineRE.FindStringSubmatch(buildline) + if len(matches) != 2 { + return 0, fmt.Errorf("unexpected buildline: %s", buildline) + } + + syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) + if err != nil { + return 0, fmt.Errorf("%s in buildline: %s", err, buildline) + } + + return syncedBlocks, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/proc.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/proc.go new file mode 100644 index 00000000..0d0a6a90 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/proc.go @@ -0,0 +1,212 @@ +package procfs + +import ( + "fmt" + "io/ioutil" + "os" + "strconv" + "strings" +) + +// Proc provides information about a running process. +type Proc struct { + // The process ID. + PID int + + fs FS +} + +// Procs represents a list of Proc structs. +type Procs []Proc + +func (p Procs) Len() int { return len(p) } +func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } + +// Self returns a process for the current process read via /proc/self. +func Self() (Proc, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Proc{}, err + } + return fs.Self() +} + +// NewProc returns a process for the given pid under /proc. +func NewProc(pid int) (Proc, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Proc{}, err + } + return fs.NewProc(pid) +} + +// AllProcs returns a list of all currently available processes under /proc. +func AllProcs() (Procs, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Procs{}, err + } + return fs.AllProcs() +} + +// Self returns a process for the current process. +func (fs FS) Self() (Proc, error) { + p, err := os.Readlink(fs.Path("self")) + if err != nil { + return Proc{}, err + } + pid, err := strconv.Atoi(strings.Replace(p, string(fs), "", -1)) + if err != nil { + return Proc{}, err + } + return fs.NewProc(pid) +} + +// NewProc returns a process for the given pid. +func (fs FS) NewProc(pid int) (Proc, error) { + if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil { + return Proc{}, err + } + return Proc{PID: pid, fs: fs}, nil +} + +// AllProcs returns a list of all currently available processes. +func (fs FS) AllProcs() (Procs, error) { + d, err := os.Open(fs.Path()) + if err != nil { + return Procs{}, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err) + } + + p := Procs{} + for _, n := range names { + pid, err := strconv.ParseInt(n, 10, 64) + if err != nil { + continue + } + p = append(p, Proc{PID: int(pid), fs: fs}) + } + + return p, nil +} + +// CmdLine returns the command line of a process. +func (p Proc) CmdLine() ([]string, error) { + f, err := os.Open(p.path("cmdline")) + if err != nil { + return nil, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + + if len(data) < 1 { + return []string{}, nil + } + + return strings.Split(string(data[:len(data)-1]), string(byte(0))), nil +} + +// Comm returns the command name of a process. +func (p Proc) Comm() (string, error) { + f, err := os.Open(p.path("comm")) + if err != nil { + return "", err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return "", err + } + + return strings.TrimSpace(string(data)), nil +} + +// Executable returns the absolute path of the executable command of a process. +func (p Proc) Executable() (string, error) { + exe, err := os.Readlink(p.path("exe")) + if os.IsNotExist(err) { + return "", nil + } + + return exe, err +} + +// FileDescriptors returns the currently open file descriptors of a process. +func (p Proc) FileDescriptors() ([]uintptr, error) { + names, err := p.fileDescriptors() + if err != nil { + return nil, err + } + + fds := make([]uintptr, len(names)) + for i, n := range names { + fd, err := strconv.ParseInt(n, 10, 32) + if err != nil { + return nil, fmt.Errorf("could not parse fd %s: %s", n, err) + } + fds[i] = uintptr(fd) + } + + return fds, nil +} + +// FileDescriptorTargets returns the targets of all file descriptors of a process. +// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string. +func (p Proc) FileDescriptorTargets() ([]string, error) { + names, err := p.fileDescriptors() + if err != nil { + return nil, err + } + + targets := make([]string, len(names)) + + for i, name := range names { + target, err := os.Readlink(p.path("fd", name)) + if err == nil { + targets[i] = target + } + } + + return targets, nil +} + +// FileDescriptorsLen returns the number of currently open file descriptors of +// a process. +func (p Proc) FileDescriptorsLen() (int, error) { + fds, err := p.fileDescriptors() + if err != nil { + return 0, err + } + + return len(fds), nil +} + +func (p Proc) fileDescriptors() ([]string, error) { + d, err := os.Open(p.path("fd")) + if err != nil { + return nil, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return nil, fmt.Errorf("could not read %s: %s", d.Name(), err) + } + + return names, nil +} + +func (p Proc) path(pa ...string) string { + return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/proc_io.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/proc_io.go new file mode 100644 index 00000000..b4e31d7b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/proc_io.go @@ -0,0 +1,55 @@ +package procfs + +import ( + "fmt" + "io/ioutil" + "os" +) + +// ProcIO models the content of /proc//io. +type ProcIO struct { + // Chars read. + RChar uint64 + // Chars written. + WChar uint64 + // Read syscalls. + SyscR uint64 + // Write syscalls. + SyscW uint64 + // Bytes read. + ReadBytes uint64 + // Bytes written. + WriteBytes uint64 + // Bytes written, but taking into account truncation. See + // Documentation/filesystems/proc.txt in the kernel sources for + // detailed explanation. + CancelledWriteBytes int64 +} + +// NewIO creates a new ProcIO instance from a given Proc instance. +func (p Proc) NewIO() (ProcIO, error) { + pio := ProcIO{} + + f, err := os.Open(p.path("io")) + if err != nil { + return pio, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return pio, err + } + + ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" + + "read_bytes: %d\nwrite_bytes: %d\n" + + "cancelled_write_bytes: %d\n" + + _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR, + &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes) + if err != nil { + return pio, err + } + + return pio, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/proc_limits.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/proc_limits.go new file mode 100644 index 00000000..2df997ce --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/proc_limits.go @@ -0,0 +1,137 @@ +package procfs + +import ( + "bufio" + "fmt" + "os" + "regexp" + "strconv" +) + +// ProcLimits represents the soft limits for each of the process's resource +// limits. For more information see getrlimit(2): +// http://man7.org/linux/man-pages/man2/getrlimit.2.html. +type ProcLimits struct { + // CPU time limit in seconds. + CPUTime int + // Maximum size of files that the process may create. + FileSize int + // Maximum size of the process's data segment (initialized data, + // uninitialized data, and heap). + DataSize int + // Maximum size of the process stack in bytes. + StackSize int + // Maximum size of a core file. + CoreFileSize int + // Limit of the process's resident set in pages. + ResidentSet int + // Maximum number of processes that can be created for the real user ID of + // the calling process. + Processes int + // Value one greater than the maximum file descriptor number that can be + // opened by this process. + OpenFiles int + // Maximum number of bytes of memory that may be locked into RAM. + LockedMemory int + // Maximum size of the process's virtual memory address space in bytes. + AddressSpace int + // Limit on the combined number of flock(2) locks and fcntl(2) leases that + // this process may establish. + FileLocks int + // Limit of signals that may be queued for the real user ID of the calling + // process. + PendingSignals int + // Limit on the number of bytes that can be allocated for POSIX message + // queues for the real user ID of the calling process. + MsqqueueSize int + // Limit of the nice priority set using setpriority(2) or nice(2). + NicePriority int + // Limit of the real-time priority set using sched_setscheduler(2) or + // sched_setparam(2). + RealtimePriority int + // Limit (in microseconds) on the amount of CPU time that a process + // scheduled under a real-time scheduling policy may consume without making + // a blocking system call. + RealtimeTimeout int +} + +const ( + limitsFields = 3 + limitsUnlimited = "unlimited" +) + +var ( + limitsDelimiter = regexp.MustCompile(" +") +) + +// NewLimits returns the current soft limits of the process. +func (p Proc) NewLimits() (ProcLimits, error) { + f, err := os.Open(p.path("limits")) + if err != nil { + return ProcLimits{}, err + } + defer f.Close() + + var ( + l = ProcLimits{} + s = bufio.NewScanner(f) + ) + for s.Scan() { + fields := limitsDelimiter.Split(s.Text(), limitsFields) + if len(fields) != limitsFields { + return ProcLimits{}, fmt.Errorf( + "couldn't parse %s line %s", f.Name(), s.Text()) + } + + switch fields[0] { + case "Max cpu time": + l.CPUTime, err = parseInt(fields[1]) + case "Max file size": + l.FileSize, err = parseInt(fields[1]) + case "Max data size": + l.DataSize, err = parseInt(fields[1]) + case "Max stack size": + l.StackSize, err = parseInt(fields[1]) + case "Max core file size": + l.CoreFileSize, err = parseInt(fields[1]) + case "Max resident set": + l.ResidentSet, err = parseInt(fields[1]) + case "Max processes": + l.Processes, err = parseInt(fields[1]) + case "Max open files": + l.OpenFiles, err = parseInt(fields[1]) + case "Max locked memory": + l.LockedMemory, err = parseInt(fields[1]) + case "Max address space": + l.AddressSpace, err = parseInt(fields[1]) + case "Max file locks": + l.FileLocks, err = parseInt(fields[1]) + case "Max pending signals": + l.PendingSignals, err = parseInt(fields[1]) + case "Max msgqueue size": + l.MsqqueueSize, err = parseInt(fields[1]) + case "Max nice priority": + l.NicePriority, err = parseInt(fields[1]) + case "Max realtime priority": + l.RealtimePriority, err = parseInt(fields[1]) + case "Max realtime timeout": + l.RealtimeTimeout, err = parseInt(fields[1]) + } + if err != nil { + return ProcLimits{}, err + } + } + + return l, s.Err() +} + +func parseInt(s string) (int, error) { + if s == limitsUnlimited { + return -1, nil + } + i, err := strconv.ParseInt(s, 10, 32) + if err != nil { + return 0, fmt.Errorf("couldn't parse value %s: %s", s, err) + } + return int(i), nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/proc_stat.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/proc_stat.go new file mode 100644 index 00000000..724e271b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/proc_stat.go @@ -0,0 +1,175 @@ +package procfs + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" +) + +// Originally, this USER_HZ value was dynamically retrieved via a sysconf call +// which required cgo. However, that caused a lot of problems regarding +// cross-compilation. Alternatives such as running a binary to determine the +// value, or trying to derive it in some other way were all problematic. After +// much research it was determined that USER_HZ is actually hardcoded to 100 on +// all Go-supported platforms as of the time of this writing. This is why we +// decided to hardcode it here as well. It is not impossible that there could +// be systems with exceptions, but they should be very exotic edge cases, and +// in that case, the worst outcome will be two misreported metrics. +// +// See also the following discussions: +// +// - https://github.com/prometheus/node_exporter/issues/52 +// - https://github.com/prometheus/procfs/pull/2 +// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue +const userHZ = 100 + +// ProcStat provides status information about the process, +// read from /proc/[pid]/stat. +type ProcStat struct { + // The process ID. + PID int + // The filename of the executable. + Comm string + // The process state. + State string + // The PID of the parent of this process. + PPID int + // The process group ID of the process. + PGRP int + // The session ID of the process. + Session int + // The controlling terminal of the process. + TTY int + // The ID of the foreground process group of the controlling terminal of + // the process. + TPGID int + // The kernel flags word of the process. + Flags uint + // The number of minor faults the process has made which have not required + // loading a memory page from disk. + MinFlt uint + // The number of minor faults that the process's waited-for children have + // made. + CMinFlt uint + // The number of major faults the process has made which have required + // loading a memory page from disk. + MajFlt uint + // The number of major faults that the process's waited-for children have + // made. + CMajFlt uint + // Amount of time that this process has been scheduled in user mode, + // measured in clock ticks. + UTime uint + // Amount of time that this process has been scheduled in kernel mode, + // measured in clock ticks. + STime uint + // Amount of time that this process's waited-for children have been + // scheduled in user mode, measured in clock ticks. + CUTime uint + // Amount of time that this process's waited-for children have been + // scheduled in kernel mode, measured in clock ticks. + CSTime uint + // For processes running a real-time scheduling policy, this is the negated + // scheduling priority, minus one. + Priority int + // The nice value, a value in the range 19 (low priority) to -20 (high + // priority). + Nice int + // Number of threads in this process. + NumThreads int + // The time the process started after system boot, the value is expressed + // in clock ticks. + Starttime uint64 + // Virtual memory size in bytes. + VSize int + // Resident set size in pages. + RSS int + + fs FS +} + +// NewStat returns the current status information of the process. +func (p Proc) NewStat() (ProcStat, error) { + f, err := os.Open(p.path("stat")) + if err != nil { + return ProcStat{}, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return ProcStat{}, err + } + + var ( + ignore int + + s = ProcStat{PID: p.PID, fs: p.fs} + l = bytes.Index(data, []byte("(")) + r = bytes.LastIndex(data, []byte(")")) + ) + + if l < 0 || r < 0 { + return ProcStat{}, fmt.Errorf( + "unexpected format, couldn't extract comm: %s", + data, + ) + } + + s.Comm = string(data[l+1 : r]) + _, err = fmt.Fscan( + bytes.NewBuffer(data[r+2:]), + &s.State, + &s.PPID, + &s.PGRP, + &s.Session, + &s.TTY, + &s.TPGID, + &s.Flags, + &s.MinFlt, + &s.CMinFlt, + &s.MajFlt, + &s.CMajFlt, + &s.UTime, + &s.STime, + &s.CUTime, + &s.CSTime, + &s.Priority, + &s.Nice, + &s.NumThreads, + &ignore, + &s.Starttime, + &s.VSize, + &s.RSS, + ) + if err != nil { + return ProcStat{}, err + } + + return s, nil +} + +// VirtualMemory returns the virtual memory size in bytes. +func (s ProcStat) VirtualMemory() int { + return s.VSize +} + +// ResidentMemory returns the resident memory size in bytes. +func (s ProcStat) ResidentMemory() int { + return s.RSS * os.Getpagesize() +} + +// StartTime returns the unix timestamp of the process in seconds. +func (s ProcStat) StartTime() (float64, error) { + stat, err := s.fs.NewStat() + if err != nil { + return 0, err + } + return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil +} + +// CPUTime returns the total CPU user and system time in seconds. +func (s ProcStat) CPUTime() float64 { + return float64(s.UTime+s.STime) / userHZ +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/stat.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/stat.go new file mode 100644 index 00000000..1ca217e8 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/stat.go @@ -0,0 +1,56 @@ +package procfs + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" +) + +// Stat represents kernel/system statistics. +type Stat struct { + // Boot time in seconds since the Epoch. + BootTime int64 +} + +// NewStat returns kernel/system statistics read from /proc/stat. +func NewStat() (Stat, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Stat{}, err + } + + return fs.NewStat() +} + +// NewStat returns an information about current kernel/system statistics. +func (fs FS) NewStat() (Stat, error) { + f, err := os.Open(fs.Path("stat")) + if err != nil { + return Stat{}, err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + line := s.Text() + if !strings.HasPrefix(line, "btime") { + continue + } + fields := strings.Fields(line) + if len(fields) != 2 { + return Stat{}, fmt.Errorf("couldn't parse %s line %s", f.Name(), line) + } + i, err := strconv.ParseInt(fields[1], 10, 32) + if err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s: %s", fields[1], err) + } + return Stat{BootTime: i}, nil + } + if err := s.Err(); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err) + } + + return Stat{}, fmt.Errorf("couldn't parse %s, missing btime", f.Name()) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/pierrec/lz4/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/pierrec/lz4/LICENSE new file mode 100644 index 00000000..bd899d83 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/pierrec/lz4/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2015, Pierre Curto +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of xxHash nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/elastic/beats/vendor/github.com/pierrec/lz4/README.md b/vendor/github.com/elastic/beats/vendor/github.com/pierrec/lz4/README.md new file mode 100644 index 00000000..dd3c9d47 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/pierrec/lz4/README.md @@ -0,0 +1,31 @@ +[![godoc](https://godoc.org/github.com/pierrec/lz4?status.png)](https://godoc.org/github.com/pierrec/lz4) +[![Build Status](https://travis-ci.org/pierrec/lz4.svg?branch=master)](https://travis-ci.org/pierrec/lz4) + +# lz4 +LZ4 compression and decompression in pure Go + +## Usage + +```go +import "github.com/pierrec/lz4" +``` + +## Description + +Package lz4 implements reading and writing lz4 compressed data (a frame), +as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html, +using an io.Reader (decompression) and io.Writer (compression). +It is designed to minimize memory usage while maximizing throughput by being able to +[de]compress data concurrently. + +The Reader and the Writer support concurrent processing provided the supplied buffers are +large enough (in multiples of BlockMaxSize) and there is no block dependency. +Reader.WriteTo and Writer.ReadFrom do leverage the concurrency transparently. +The runtime.GOMAXPROCS() value is used to apply concurrency or not. + +Although the block level compression and decompression functions are exposed and are fully compatible +with the lz4 block format definition, they are low level and should not be used directly. +For a complete description of an lz4 compressed block, see: +http://fastcompression.blogspot.fr/2011/05/lz4-explained.html + +See https://github.com/Cyan4973/lz4 for the reference C implementation. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/pierrec/lz4/block.go b/vendor/github.com/elastic/beats/vendor/github.com/pierrec/lz4/block.go new file mode 100644 index 00000000..6884bccd --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/pierrec/lz4/block.go @@ -0,0 +1,474 @@ +package lz4 + +import ( + "encoding/binary" + "errors" + "unsafe" +) + +// block represents a frame data block. +// Used when compressing or decompressing frame blocks concurrently. +type block struct { + compressed bool + zdata []byte // compressed data + data []byte // decompressed data + offset int // offset within the data as with block dependency the 64Kb window is prepended to it + checksum uint32 // compressed data checksum + err error // error while [de]compressing +} + +var ( + // ErrInvalidSource is returned by UncompressBlock when a compressed block is corrupted. + ErrInvalidSource = errors.New("lz4: invalid source") + // ErrShortBuffer is returned by UncompressBlock, CompressBlock or CompressBlockHC when + // the supplied buffer for [de]compression is too small. + ErrShortBuffer = errors.New("lz4: short buffer") +) + +// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible. +func CompressBlockBound(n int) int { + return n + n/255 + 16 +} + +// UncompressBlock decompresses the source buffer into the destination one, +// starting at the di index and returning the decompressed size. +// +// The destination buffer must be sized appropriately. +// +// An error is returned if the source data is invalid or the destination buffer is too small. +func UncompressBlock(src, dst []byte, di int) (int, error) { + si, sn, di0 := 0, len(src), di + if sn == 0 { + return 0, nil + } + + for { + // literals and match lengths (token) + lLen := int(src[si] >> 4) + mLen := int(src[si] & 0xF) + if si++; si == sn { + return di, ErrInvalidSource + } + + // literals + if lLen > 0 { + if lLen == 0xF { + for src[si] == 0xFF { + lLen += 0xFF + if si++; si == sn { + return di - di0, ErrInvalidSource + } + } + lLen += int(src[si]) + if si++; si == sn { + return di - di0, ErrInvalidSource + } + } + if len(dst)-di < lLen || si+lLen > sn { + return di - di0, ErrShortBuffer + } + di += copy(dst[di:], src[si:si+lLen]) + + if si += lLen; si >= sn { + return di - di0, nil + } + } + + if si += 2; si >= sn { + return di, ErrInvalidSource + } + offset := int(src[si-2]) | int(src[si-1])<<8 + if di-offset < 0 || offset == 0 { + return di - di0, ErrInvalidSource + } + + // match + if mLen == 0xF { + for src[si] == 0xFF { + mLen += 0xFF + if si++; si == sn { + return di - di0, ErrInvalidSource + } + } + mLen += int(src[si]) + if si++; si == sn { + return di - di0, ErrInvalidSource + } + } + // minimum match length is 4 + mLen += 4 + if len(dst)-di <= mLen { + return di - di0, ErrShortBuffer + } + + // copy the match (NB. match is at least 4 bytes long) + // NB. past di, copy() would write old bytes instead of + // the ones we just copied, so split the work into the largest chunk. + for ; mLen >= offset; mLen -= offset { + di += copy(dst[di:], dst[di-offset:di]) + } + di += copy(dst[di:], dst[di-offset:di-offset+mLen]) + } +} + +type hashEntry struct { + generation uint + value int +} + +// CompressBlock compresses the source buffer starting at soffet into the destination one. +// This is the fast version of LZ4 compression and also the default one. +// +// The size of the compressed data is returned. If it is 0 and no error, then the data is incompressible. +// +// An error is returned if the destination buffer is too small. +func CompressBlock(src, dst []byte, soffset int) (int, error) { + var hashTable [hashTableSize]hashEntry + return compressGenerationalBlock(src, dst, soffset, 0, hashTable[:]) +} + +// getUint32 is a despicably evil function (well, for Go!) that takes advantage +// of the machine's byte order to save some operations. This may look +// inefficient but it is significantly faster on littleEndian machines, +// which include x84, amd64, and some ARM processors. +func getUint32(b []byte) uint32 { + _ = b[3] + if isLittleEndian { + return *(*uint32)(unsafe.Pointer(&b)) + } + + return uint32(b[0]) | + uint32(b[1])<<8 | + uint32(b[2])<<16 | + uint32(b[3])<<24 +} + +func compressGenerationalBlock(src, dst []byte, soffset int, generation uint, hashTable []hashEntry) (int, error) { + sn, dn := len(src)-mfLimit, len(dst) + if sn <= 0 || dn == 0 || soffset >= sn { + return 0, nil + } + var si, di int + + // fast scan strategy: + // we only need a hash table to store the last sequences (4 bytes) + var hashShift = uint((minMatch * 8) - hashLog) + + // Initialise the hash table with the first 64Kb of the input buffer + // (used when compressing dependent blocks) + for si < soffset { + h := getUint32(src[si:]) * hasher >> hashShift + si++ + hashTable[h] = hashEntry{generation, si} + } + + anchor := si + fma := 1 << skipStrength + for si < sn-minMatch { + // hash the next 4 bytes (sequence)... + h := getUint32(src[si:]) * hasher >> hashShift + if hashTable[h].generation != generation { + hashTable[h] = hashEntry{generation, 0} + } + // -1 to separate existing entries from new ones + ref := hashTable[h].value - 1 + // ...and store the position of the hash in the hash table (+1 to compensate the -1 upon saving) + hashTable[h].value = si + 1 + // no need to check the last 3 bytes in the first literal 4 bytes as + // this guarantees that the next match, if any, is compressed with + // a lower size, since to have some compression we must have: + // ll+ml-overlap > 1 + (ll-15)/255 + (ml-4-15)/255 + 2 (uncompressed size>compressed size) + // => ll+ml>3+2*overlap => ll+ml>= 4+2*overlap + // and by definition we do have: + // ll >= 1, ml >= 4 + // => ll+ml >= 5 + // => so overlap must be 0 + + // the sequence is new, out of bound (64kb) or not valid: try next sequence + if ref < 0 || fma&(1<>winSizeLog > 0 || + src[ref] != src[si] || + src[ref+1] != src[si+1] || + src[ref+2] != src[si+2] || + src[ref+3] != src[si+3] { + // variable step: improves performance on non-compressible data + si += fma >> skipStrength + fma++ + continue + } + // match found + fma = 1 << skipStrength + lLen := si - anchor + offset := si - ref + + // encode match length part 1 + si += minMatch + mLen := si // match length has minMatch already + for si <= sn && src[si] == src[si-offset] { + si++ + } + mLen = si - mLen + if mLen < 0xF { + dst[di] = byte(mLen) + } else { + dst[di] = 0xF + } + + // encode literals length + if lLen < 0xF { + dst[di] |= byte(lLen << 4) + } else { + dst[di] |= 0xF0 + if di++; di == dn { + return di, ErrShortBuffer + } + l := lLen - 0xF + for ; l >= 0xFF; l -= 0xFF { + dst[di] = 0xFF + if di++; di == dn { + return di, ErrShortBuffer + } + } + dst[di] = byte(l) + } + if di++; di == dn { + return di, ErrShortBuffer + } + + // literals + if di+lLen >= dn { + return di, ErrShortBuffer + } + di += copy(dst[di:], src[anchor:anchor+lLen]) + anchor = si + + // encode offset + if di += 2; di >= dn { + return di, ErrShortBuffer + } + dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) + + // encode match length part 2 + if mLen >= 0xF { + for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF { + dst[di] = 0xFF + if di++; di == dn { + return di, ErrShortBuffer + } + } + dst[di] = byte(mLen) + if di++; di == dn { + return di, ErrShortBuffer + } + } + } + + if anchor == 0 { + // incompressible + return 0, nil + } + + // last literals + lLen := len(src) - anchor + if lLen < 0xF { + dst[di] = byte(lLen << 4) + } else { + dst[di] = 0xF0 + if di++; di == dn { + return di, ErrShortBuffer + } + lLen -= 0xF + for ; lLen >= 0xFF; lLen -= 0xFF { + dst[di] = 0xFF + if di++; di == dn { + return di, ErrShortBuffer + } + } + dst[di] = byte(lLen) + } + if di++; di == dn { + return di, ErrShortBuffer + } + + // write literals + src = src[anchor:] + switch n := di + len(src); { + case n > dn: + return di, ErrShortBuffer + case n >= sn: + // incompressible + return 0, nil + } + di += copy(dst[di:], src) + return di, nil +} + +// CompressBlockHC compresses the source buffer starting at soffet into the destination one. +// CompressBlockHC compression ratio is better than CompressBlock but it is also slower. +// +// The size of the compressed data is returned. If it is 0 and no error, then the data is not compressible. +// +// An error is returned if the destination buffer is too small. +func CompressBlockHC(src, dst []byte, soffset int) (int, error) { + sn, dn := len(src)-mfLimit, len(dst) + if sn <= 0 || dn == 0 || soffset >= sn { + return 0, nil + } + var si, di int + + // Hash Chain strategy: + // we need a hash table and a chain table + // the chain table cannot contain more entries than the window size (64Kb entries) + var hashTable [1 << hashLog]int + var chainTable [winSize]int + var hashShift = uint((minMatch * 8) - hashLog) + + // Initialise the hash table with the first 64Kb of the input buffer + // (used when compressing dependent blocks) + for si < soffset { + h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift + chainTable[si&winMask] = hashTable[h] + si++ + hashTable[h] = si + } + + anchor := si + for si < sn-minMatch { + // hash the next 4 bytes (sequence)... + h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift + + // follow the chain until out of window and give the longest match + mLen := 0 + offset := 0 + for next := hashTable[h] - 1; next > 0 && next > si-winSize; next = chainTable[next&winMask] - 1 { + // the first (mLen==0) or next byte (mLen>=minMatch) at current match length must match to improve on the match length + if src[next+mLen] == src[si+mLen] { + for ml := 0; ; ml++ { + if src[next+ml] != src[si+ml] || si+ml > sn { + // found a longer match, keep its position and length + if mLen < ml && ml >= minMatch { + mLen = ml + offset = si - next + } + break + } + } + } + } + chainTable[si&winMask] = hashTable[h] + hashTable[h] = si + 1 + + // no match found + if mLen == 0 { + si++ + continue + } + + // match found + // update hash/chain tables with overlaping bytes: + // si already hashed, add everything from si+1 up to the match length + for si, ml := si+1, si+mLen; si < ml; { + h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift + chainTable[si&winMask] = hashTable[h] + si++ + hashTable[h] = si + } + + lLen := si - anchor + si += mLen + mLen -= minMatch // match length does not include minMatch + + if mLen < 0xF { + dst[di] = byte(mLen) + } else { + dst[di] = 0xF + } + + // encode literals length + if lLen < 0xF { + dst[di] |= byte(lLen << 4) + } else { + dst[di] |= 0xF0 + if di++; di == dn { + return di, ErrShortBuffer + } + l := lLen - 0xF + for ; l >= 0xFF; l -= 0xFF { + dst[di] = 0xFF + if di++; di == dn { + return di, ErrShortBuffer + } + } + dst[di] = byte(l) + } + if di++; di == dn { + return di, ErrShortBuffer + } + + // literals + if di+lLen >= dn { + return di, ErrShortBuffer + } + di += copy(dst[di:], src[anchor:anchor+lLen]) + anchor = si + + // encode offset + if di += 2; di >= dn { + return di, ErrShortBuffer + } + dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) + + // encode match length part 2 + if mLen >= 0xF { + for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF { + dst[di] = 0xFF + if di++; di == dn { + return di, ErrShortBuffer + } + } + dst[di] = byte(mLen) + if di++; di == dn { + return di, ErrShortBuffer + } + } + } + + if anchor == 0 { + // incompressible + return 0, nil + } + + // last literals + lLen := len(src) - anchor + if lLen < 0xF { + dst[di] = byte(lLen << 4) + } else { + dst[di] = 0xF0 + if di++; di == dn { + return di, ErrShortBuffer + } + lLen -= 0xF + for ; lLen >= 0xFF; lLen -= 0xFF { + dst[di] = 0xFF + if di++; di == dn { + return di, ErrShortBuffer + } + } + dst[di] = byte(lLen) + } + if di++; di == dn { + return di, ErrShortBuffer + } + + // write literals + src = src[anchor:] + switch n := di + len(src); { + case n > dn: + return di, ErrShortBuffer + case n >= sn: + // incompressible + return 0, nil + } + di += copy(dst[di:], src) + return di, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/pierrec/lz4/lz4.go b/vendor/github.com/elastic/beats/vendor/github.com/pierrec/lz4/lz4.go new file mode 100644 index 00000000..46389243 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/pierrec/lz4/lz4.go @@ -0,0 +1,118 @@ +// Package lz4 implements reading and writing lz4 compressed data (a frame), +// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html, +// using an io.Reader (decompression) and io.Writer (compression). +// It is designed to minimize memory usage while maximizing throughput by being able to +// [de]compress data concurrently. +// +// The Reader and the Writer support concurrent processing provided the supplied buffers are +// large enough (in multiples of BlockMaxSize) and there is no block dependency. +// Reader.WriteTo and Writer.ReadFrom do leverage the concurrency transparently. +// The runtime.GOMAXPROCS() value is used to apply concurrency or not. +// +// Although the block level compression and decompression functions are exposed and are fully compatible +// with the lz4 block format definition, they are low level and should not be used directly. +// For a complete description of an lz4 compressed block, see: +// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html +// +// See https://github.com/Cyan4973/lz4 for the reference C implementation. +package lz4 + +import ( + "hash" + "sync" + "unsafe" + + "github.com/pierrec/xxHash/xxHash32" +) + +const ( + // Extension is the LZ4 frame file name extension + Extension = ".lz4" + // Version is the LZ4 frame format version + Version = 1 + + frameMagic = uint32(0x184D2204) + frameSkipMagic = uint32(0x184D2A50) + + // The following constants are used to setup the compression algorithm. + minMatch = 4 // the minimum size of the match sequence size (4 bytes) + winSizeLog = 16 // LZ4 64Kb window size limit + winSize = 1 << winSizeLog + winMask = winSize - 1 // 64Kb window of previous data for dependent blocks + + // hashLog determines the size of the hash table used to quickly find a previous match position. + // Its value influences the compression speed and memory usage, the lower the faster, + // but at the expense of the compression ratio. + // 16 seems to be the best compromise. + hashLog = 16 + hashTableSize = 1 << hashLog + hashShift = uint((minMatch * 8) - hashLog) + + mfLimit = 8 + minMatch // The last match cannot start within the last 12 bytes. + skipStrength = 6 // variable step for fast scan + + hasher = uint32(2654435761) // prime number used to hash minMatch +) + +// map the block max size id with its value in bytes: 64Kb, 256Kb, 1Mb and 4Mb. +var bsMapID = map[byte]int{4: 64 << 10, 5: 256 << 10, 6: 1 << 20, 7: 4 << 20} +var bsMapValue = map[int]byte{} + +// Reversed. +func init() { + for i, v := range bsMapID { + bsMapValue[v] = i + } +} + +var isLittleEndian = getIsLittleEndian() + +func getIsLittleEndian() (ret bool) { + var i int = 0x1 + bs := (*[1]byte)(unsafe.Pointer(&i)) + if bs[0] == 0 { + return false + } + + return true +} + +// Header describes the various flags that can be set on a Writer or obtained from a Reader. +// The default values match those of the LZ4 frame format definition (http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html). +// +// NB. in a Reader, in case of concatenated frames, the Header values may change between Read() calls. +// It is the caller responsibility to check them if necessary (typically when using the Reader concurrency). +type Header struct { + BlockDependency bool // compressed blocks are dependent (one block depends on the last 64Kb of the previous one) + BlockChecksum bool // compressed blocks are checksumed + NoChecksum bool // frame checksum + BlockMaxSize int // the size of the decompressed data block (one of [64KB, 256KB, 1MB, 4MB]). Default=4MB. + Size uint64 // the frame total size. It is _not_ computed by the Writer. + HighCompression bool // use high compression (only for the Writer) + done bool // whether the descriptor was processed (Read or Write and checked) + // Removed as not supported + // Dict bool // a dictionary id is to be used + // DictID uint32 // the dictionary id read from the frame, if any. +} + +// xxhPool wraps the standard pool for xxHash items. +// Putting items back in the pool automatically resets them. +type xxhPool struct { + sync.Pool +} + +func (p *xxhPool) Get() hash.Hash32 { + return p.Pool.Get().(hash.Hash32) +} + +func (p *xxhPool) Put(h hash.Hash32) { + h.Reset() + p.Pool.Put(h) +} + +// hashPool is used by readers and writers and contains xxHash items. +var hashPool = xxhPool{ + Pool: sync.Pool{ + New: func() interface{} { return xxHash32.New(0) }, + }, +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/pierrec/lz4/lz4c/main.go b/vendor/github.com/elastic/beats/vendor/github.com/pierrec/lz4/lz4c/main.go new file mode 100644 index 00000000..048ab500 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/pierrec/lz4/lz4c/main.go @@ -0,0 +1,108 @@ +// Command line utility for the lz4 package. +package main + +import ( + // "bytes" + + "flag" + "fmt" + "io" + "log" + "os" + "path" + "runtime" + "strings" + + "github.com/pierrec/lz4" +) + +func main() { + // Process command line arguments + var ( + blockMaxSizeDefault = 4 << 20 + flagStdout = flag.Bool("c", false, "output to stdout") + flagDecompress = flag.Bool("d", false, "decompress flag") + flagBlockMaxSize = flag.Int("B", blockMaxSizeDefault, "block max size [64Kb,256Kb,1Mb,4Mb]") + flagBlockDependency = flag.Bool("BD", false, "enable block dependency") + flagBlockChecksum = flag.Bool("BX", false, "enable block checksum") + flagStreamChecksum = flag.Bool("Sx", false, "disable stream checksum") + flagHighCompression = flag.Bool("9", false, "enabled high compression") + ) + flag.Usage = func() { + fmt.Fprintf(os.Stderr, "Usage:\n\t%s [arg] [input]...\n\tNo input means [de]compress stdin to stdout\n\n", os.Args[0]) + flag.PrintDefaults() + } + flag.Parse() + + // Use all CPUs + runtime.GOMAXPROCS(runtime.NumCPU()) + + zr := lz4.NewReader(nil) + zw := lz4.NewWriter(nil) + zh := lz4.Header{ + BlockDependency: *flagBlockDependency, + BlockChecksum: *flagBlockChecksum, + BlockMaxSize: *flagBlockMaxSize, + NoChecksum: *flagStreamChecksum, + HighCompression: *flagHighCompression, + } + + worker := func(in io.Reader, out io.Writer) { + if *flagDecompress { + zr.Reset(in) + if _, err := io.Copy(out, zr); err != nil { + log.Fatalf("Error while decompressing input: %v", err) + } + } else { + zw.Reset(out) + zw.Header = zh + if _, err := io.Copy(zw, in); err != nil { + log.Fatalf("Error while compressing input: %v", err) + } + if err := zw.Close(); err != nil { + log.Fatalf("Error while closing stream: %v", err) + } + } + } + + // No input means [de]compress stdin to stdout + if len(flag.Args()) == 0 { + worker(os.Stdin, os.Stdout) + os.Exit(0) + } + + // Compress or decompress all input files + for _, inputFileName := range flag.Args() { + outputFileName := path.Clean(inputFileName) + + if !*flagStdout { + if *flagDecompress { + outputFileName = strings.TrimSuffix(outputFileName, lz4.Extension) + if outputFileName == inputFileName { + log.Fatalf("Invalid output file name: same as input: %s", inputFileName) + } + } else { + outputFileName += lz4.Extension + } + } + + inputFile, err := os.Open(inputFileName) + if err != nil { + log.Fatalf("Error while opening input: %v", err) + } + + outputFile := os.Stdout + if !*flagStdout { + outputFile, err = os.Create(outputFileName) + if err != nil { + log.Fatalf("Error while opening output: %v", err) + } + } + worker(inputFile, outputFile) + + inputFile.Close() + if !*flagStdout { + outputFile.Close() + } + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/pierrec/lz4/reader.go b/vendor/github.com/elastic/beats/vendor/github.com/pierrec/lz4/reader.go new file mode 100644 index 00000000..9f7fd604 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/pierrec/lz4/reader.go @@ -0,0 +1,364 @@ +package lz4 + +import ( + "encoding/binary" + "errors" + "fmt" + "hash" + "io" + "io/ioutil" + "runtime" + "sync" + "sync/atomic" +) + +// ErrInvalid is returned when the data being read is not an LZ4 archive +// (LZ4 magic number detection failed). +var ErrInvalid = errors.New("invalid lz4 data") + +// errEndOfBlock is returned by readBlock when it has reached the last block of the frame. +// It is not an error. +var errEndOfBlock = errors.New("end of block") + +// Reader implements the LZ4 frame decoder. +// The Header is set after the first call to Read(). +// The Header may change between Read() calls in case of concatenated frames. +type Reader struct { + Pos int64 // position within the source + Header + src io.Reader + checksum hash.Hash32 // frame hash + wg sync.WaitGroup // decompressing go routine wait group + data []byte // buffered decompressed data + window []byte // 64Kb decompressed data window +} + +// NewReader returns a new LZ4 frame decoder. +// No access to the underlying io.Reader is performed. +func NewReader(src io.Reader) *Reader { + return &Reader{ + src: src, + checksum: hashPool.Get(), + } +} + +// readHeader checks the frame magic number and parses the frame descriptoz. +// Skippable frames are supported even as a first frame although the LZ4 +// specifications recommends skippable frames not to be used as first frames. +func (z *Reader) readHeader(first bool) error { + defer z.checksum.Reset() + + for { + var magic uint32 + if err := binary.Read(z.src, binary.LittleEndian, &magic); err != nil { + if !first && err == io.ErrUnexpectedEOF { + return io.EOF + } + return err + } + z.Pos += 4 + if magic>>8 == frameSkipMagic>>8 { + var skipSize uint32 + if err := binary.Read(z.src, binary.LittleEndian, &skipSize); err != nil { + return err + } + z.Pos += 4 + m, err := io.CopyN(ioutil.Discard, z.src, int64(skipSize)) + z.Pos += m + if err != nil { + return err + } + continue + } + if magic != frameMagic { + return ErrInvalid + } + break + } + + // header + var buf [8]byte + if _, err := io.ReadFull(z.src, buf[:2]); err != nil { + return err + } + z.Pos += 2 + + b := buf[0] + if b>>6 != Version { + return fmt.Errorf("lz4.Read: invalid version: got %d expected %d", b>>6, Version) + } + z.BlockDependency = b>>5&1 == 0 + z.BlockChecksum = b>>4&1 > 0 + frameSize := b>>3&1 > 0 + z.NoChecksum = b>>2&1 == 0 + // z.Dict = b&1 > 0 + + bmsID := buf[1] >> 4 & 0x7 + bSize, ok := bsMapID[bmsID] + if !ok { + return fmt.Errorf("lz4.Read: invalid block max size: %d", bmsID) + } + z.BlockMaxSize = bSize + + z.checksum.Write(buf[0:2]) + + if frameSize { + if err := binary.Read(z.src, binary.LittleEndian, &z.Size); err != nil { + return err + } + z.Pos += 8 + binary.LittleEndian.PutUint64(buf[:], z.Size) + z.checksum.Write(buf[0:8]) + } + + // if z.Dict { + // if err := binary.Read(z.src, binary.LittleEndian, &z.DictID); err != nil { + // return err + // } + // z.Pos += 4 + // binary.LittleEndian.PutUint32(buf[:], z.DictID) + // z.checksum.Write(buf[0:4]) + // } + + // header checksum + if _, err := io.ReadFull(z.src, buf[:1]); err != nil { + return err + } + z.Pos++ + if h := byte(z.checksum.Sum32() >> 8 & 0xFF); h != buf[0] { + return fmt.Errorf("lz4.Read: invalid header checksum: got %v expected %v", buf[0], h) + } + + z.Header.done = true + + return nil +} + +// Read decompresses data from the underlying source into the supplied buffer. +// +// Since there can be multiple streams concatenated, Header values may +// change between calls to Read(). If that is the case, no data is actually read from +// the underlying io.Reader, to allow for potential input buffer resizing. +// +// Data is buffered if the input buffer is too small, and exhausted upon successive calls. +// +// If the buffer is large enough (typically in multiples of BlockMaxSize) and there is +// no block dependency, then the data will be decompressed concurrently based on the GOMAXPROCS value. +func (z *Reader) Read(buf []byte) (n int, err error) { + if !z.Header.done { + if err = z.readHeader(true); err != nil { + return + } + } + + if len(buf) == 0 { + return + } + + // exhaust remaining data from previous Read() + if len(z.data) > 0 { + n = copy(buf, z.data) + z.data = z.data[n:] + if len(z.data) == 0 { + z.data = nil + } + return + } + + // Break up the input buffer into BlockMaxSize blocks with at least one block. + // Then decompress into each of them concurrently if possible (no dependency). + // In case of dependency, the first block will be missing the window (except on the + // very first call), the rest will have it already since it comes from the previous block. + wbuf := buf + zn := (len(wbuf) + z.BlockMaxSize - 1) / z.BlockMaxSize + zblocks := make([]block, zn) + for zi, abort := 0, uint32(0); zi < zn && atomic.LoadUint32(&abort) == 0; zi++ { + zb := &zblocks[zi] + // last block may be too small + if len(wbuf) < z.BlockMaxSize+len(z.window) { + wbuf = make([]byte, z.BlockMaxSize+len(z.window)) + } + copy(wbuf, z.window) + if zb.err = z.readBlock(wbuf, zb); zb.err != nil { + break + } + wbuf = wbuf[z.BlockMaxSize:] + if !z.BlockDependency { + z.wg.Add(1) + go z.decompressBlock(zb, &abort) + continue + } + // cannot decompress concurrently when dealing with block dependency + z.decompressBlock(zb, nil) + // the last block may not contain enough data + if len(z.window) == 0 { + z.window = make([]byte, winSize) + } + if len(zb.data) >= winSize { + copy(z.window, zb.data[len(zb.data)-winSize:]) + } else { + copy(z.window, z.window[len(zb.data):]) + copy(z.window[len(zb.data)+1:], zb.data) + } + } + z.wg.Wait() + + // since a block size may be less then BlockMaxSize, trim the decompressed buffers + for _, zb := range zblocks { + if zb.err != nil { + if zb.err == errEndOfBlock { + return n, z.close() + } + return n, zb.err + } + bLen := len(zb.data) + if !z.NoChecksum { + z.checksum.Write(zb.data) + } + m := copy(buf[n:], zb.data) + // buffer the remaining data (this is necessarily the last block) + if m < bLen { + z.data = zb.data[m:] + } + n += m + } + + return +} + +// readBlock reads an entire frame block from the frame. +// The input buffer is the one that will receive the decompressed data. +// If the end of the frame is detected, it returns the errEndOfBlock error. +func (z *Reader) readBlock(buf []byte, b *block) error { + var bLen uint32 + if err := binary.Read(z.src, binary.LittleEndian, &bLen); err != nil { + return err + } + atomic.AddInt64(&z.Pos, 4) + + switch { + case bLen == 0: + return errEndOfBlock + case bLen&(1<<31) == 0: + b.compressed = true + b.data = buf + b.zdata = make([]byte, bLen) + default: + bLen = bLen & (1<<31 - 1) + if int(bLen) > len(buf) { + return fmt.Errorf("lz4.Read: invalid block size: %d", bLen) + } + b.data = buf[:bLen] + b.zdata = buf[:bLen] + } + if _, err := io.ReadFull(z.src, b.zdata); err != nil { + return err + } + + if z.BlockChecksum { + if err := binary.Read(z.src, binary.LittleEndian, &b.checksum); err != nil { + return err + } + xxh := hashPool.Get() + defer hashPool.Put(xxh) + xxh.Write(b.zdata) + if h := xxh.Sum32(); h != b.checksum { + return fmt.Errorf("lz4.Read: invalid block checksum: got %x expected %x", h, b.checksum) + } + } + + return nil +} + +// decompressBlock decompresses a frame block. +// In case of an error, the block err is set with it and abort is set to 1. +func (z *Reader) decompressBlock(b *block, abort *uint32) { + if abort != nil { + defer z.wg.Done() + } + if b.compressed { + n := len(z.window) + m, err := UncompressBlock(b.zdata, b.data, n) + if err != nil { + if abort != nil { + atomic.StoreUint32(abort, 1) + } + b.err = err + return + } + b.data = b.data[n : n+m] + } + atomic.AddInt64(&z.Pos, int64(len(b.data))) +} + +// close validates the frame checksum (if any) and checks the next frame (if any). +func (z *Reader) close() error { + if !z.NoChecksum { + var checksum uint32 + if err := binary.Read(z.src, binary.LittleEndian, &checksum); err != nil { + return err + } + if checksum != z.checksum.Sum32() { + return fmt.Errorf("lz4.Read: invalid frame checksum: got %x expected %x", z.checksum.Sum32(), checksum) + } + } + + // get ready for the next concatenated frame, but do not change the position + pos := z.Pos + z.Reset(z.src) + z.Pos = pos + + // since multiple frames can be concatenated, check for another one + return z.readHeader(false) +} + +// Reset discards the Reader's state and makes it equivalent to the +// result of its original state from NewReader, but reading from r instead. +// This permits reusing a Reader rather than allocating a new one. +func (z *Reader) Reset(r io.Reader) { + z.Header = Header{} + z.Pos = 0 + z.src = r + z.checksum.Reset() + z.data = nil + z.window = nil +} + +// WriteTo decompresses the data from the underlying io.Reader and writes it to the io.Writer. +// Returns the number of bytes written. +func (z *Reader) WriteTo(w io.Writer) (n int64, err error) { + cpus := runtime.GOMAXPROCS(0) + var buf []byte + + // The initial buffer being nil, the first Read will be only read the compressed frame options. + // The buffer can then be sized appropriately to support maximum concurrency decompression. + // If multiple frames are concatenated, Read() will return with no data decompressed but with + // potentially changed options. The buffer will be resized accordingly, always trying to + // maximize concurrency. + for { + nsize := 0 + // the block max size can change if multiple streams are concatenated. + // Check it after every Read(). + if z.BlockDependency { + // in case of dependency, we cannot decompress concurrently, + // so allocate the minimum buffer + window size + nsize = len(z.window) + z.BlockMaxSize + } else { + // if no dependency, allocate a buffer large enough for concurrent decompression + nsize = cpus * z.BlockMaxSize + } + if nsize != len(buf) { + buf = make([]byte, nsize) + } + + m, er := z.Read(buf) + if er != nil && er != io.EOF { + return n, er + } + m, err = w.Write(buf[:m]) + n += int64(m) + if err != nil || er == io.EOF { + return + } + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/pierrec/lz4/writer.go b/vendor/github.com/elastic/beats/vendor/github.com/pierrec/lz4/writer.go new file mode 100644 index 00000000..11082f5a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/pierrec/lz4/writer.go @@ -0,0 +1,383 @@ +package lz4 + +import ( + "encoding/binary" + "fmt" + "hash" + "io" + "runtime" +) + +// Writer implements the LZ4 frame encoder. +type Writer struct { + Header + dst io.Writer + checksum hash.Hash32 // frame checksum + data []byte // data to be compressed, only used when dealing with block dependency as we need 64Kb to work with + window []byte // last 64KB of decompressed data (block dependency) + blockMaxSize buffer + + zbCompressBuf []byte // buffer for compressing lz4 blocks + writeSizeBuf []byte // four-byte slice for writing checksums and sizes in writeblock + hashTable []hashEntry + currentGeneration uint +} + +// NewWriter returns a new LZ4 frame encoder. +// No access to the underlying io.Writer is performed. +// The supplied Header is checked at the first Write. +// It is ok to change it before the first Write but then not until a Reset() is performed. +func NewWriter(dst io.Writer) *Writer { + return &Writer{ + dst: dst, + checksum: hashPool.Get(), + Header: Header{ + BlockMaxSize: 4 << 20, + }, + hashTable: make([]hashEntry, hashTableSize), + writeSizeBuf: make([]byte, 4), + } +} + +// writeHeader builds and writes the header (magic+header) to the underlying io.Writer. +func (z *Writer) writeHeader() error { + // Default to 4Mb if BlockMaxSize is not set + if z.Header.BlockMaxSize == 0 { + z.Header.BlockMaxSize = 4 << 20 + } + // the only option that need to be validated + bSize, ok := bsMapValue[z.Header.BlockMaxSize] + if !ok { + return fmt.Errorf("lz4: invalid block max size: %d", z.Header.BlockMaxSize) + } + + // magic number(4) + header(flags(2)+[Size(8)+DictID(4)]+checksum(1)) does not exceed 19 bytes + // Size and DictID are optional + var buf [19]byte + + // set the fixed size data: magic number, block max size and flags + binary.LittleEndian.PutUint32(buf[0:], frameMagic) + flg := byte(Version << 6) + if !z.Header.BlockDependency { + flg |= 1 << 5 + } + if z.Header.BlockChecksum { + flg |= 1 << 4 + } + if z.Header.Size > 0 { + flg |= 1 << 3 + } + if !z.Header.NoChecksum { + flg |= 1 << 2 + } + // if z.Header.Dict { + // flg |= 1 + // } + buf[4] = flg + buf[5] = bSize << 4 + + // current buffer size: magic(4) + flags(1) + block max size (1) + n := 6 + // optional items + if z.Header.Size > 0 { + binary.LittleEndian.PutUint64(buf[n:], z.Header.Size) + n += 8 + } + // if z.Header.Dict { + // binary.LittleEndian.PutUint32(buf[n:], z.Header.DictID) + // n += 4 + // } + + // header checksum includes the flags, block max size and optional Size and DictID + z.checksum.Write(buf[4:n]) + buf[n] = byte(z.checksum.Sum32() >> 8 & 0xFF) + z.checksum.Reset() + + // header ready, write it out + if _, err := z.dst.Write(buf[0 : n+1]); err != nil { + return err + } + z.Header.done = true + + // initialize buffers dependent on header info + z.zbCompressBuf = make([]byte, winSize+z.BlockMaxSize) + + return nil +} + +// Write compresses data from the supplied buffer into the underlying io.Writer. +// Write does not return until the data has been written. +// +// If the input buffer is large enough (typically in multiples of BlockMaxSize) +// the data will be compressed concurrently. +// +// Write never buffers any data unless in BlockDependency mode where it may +// do so until it has 64Kb of data, after which it never buffers any. +func (z *Writer) Write(buf []byte) (n int, err error) { + if !z.Header.done { + if err = z.writeHeader(); err != nil { + return + } + } + + if len(buf) == 0 { + return + } + + if !z.NoChecksum { + z.checksum.Write(buf) + } + + // with block dependency, require at least 64Kb of data to work with + // not having 64Kb only matters initially to setup the first window + bl := 0 + if z.BlockDependency && len(z.window) == 0 { + bl = len(z.data) + z.data = append(z.data, buf...) + if len(z.data) < winSize { + return len(buf), nil + } + buf = z.data + z.data = nil + } + + // Break up the input buffer into BlockMaxSize blocks, provisioning the left over block. + // Then compress into each of them concurrently if possible (no dependency). + var ( + zb block + wbuf = buf + zn = len(wbuf) / z.BlockMaxSize + zi = 0 + leftover = len(buf) % z.BlockMaxSize + ) + +loop: + for zi < zn { + if z.BlockDependency { + if zi == 0 { + // first block does not have the window + zb.data = append(z.window, wbuf[:z.BlockMaxSize]...) + zb.offset = len(z.window) + wbuf = wbuf[z.BlockMaxSize-winSize:] + } else { + // set the uncompressed data including the window from previous block + zb.data = wbuf[:z.BlockMaxSize+winSize] + zb.offset = winSize + wbuf = wbuf[z.BlockMaxSize:] + } + } else { + zb.data = wbuf[:z.BlockMaxSize] + wbuf = wbuf[z.BlockMaxSize:] + } + + goto write + } + + // left over + if leftover > 0 { + zb = block{data: wbuf} + if z.BlockDependency { + if zn == 0 { + zb.data = append(z.window, zb.data...) + zb.offset = len(z.window) + } else { + zb.offset = winSize + } + } + + leftover = 0 + goto write + } + + if z.BlockDependency { + if len(z.window) == 0 { + z.window = make([]byte, winSize) + } + // last buffer may be shorter than the window + if len(buf) >= winSize { + copy(z.window, buf[len(buf)-winSize:]) + } else { + copy(z.window, z.window[len(buf):]) + copy(z.window[len(buf)+1:], buf) + } + } + + return + +write: + zb = z.compressBlock(zb) + _, err = z.writeBlock(zb) + + written := len(zb.data) + if bl > 0 { + if written >= bl { + written -= bl + bl = 0 + } else { + bl -= written + written = 0 + } + } + + n += written + // remove the window in zb.data + if z.BlockDependency { + if zi == 0 { + n -= len(z.window) + } else { + n -= winSize + } + } + if err != nil { + return + } + zi++ + goto loop +} + +// compressBlock compresses a block. +func (z *Writer) compressBlock(zb block) block { + // compressed block size cannot exceed the input's + var ( + n int + err error + zbuf = z.zbCompressBuf + ) + if z.HighCompression { + n, err = CompressBlockHC(zb.data, zbuf, zb.offset) + } else { + n, err = compressGenerationalBlock(zb.data, zbuf, zb.offset, z.currentGeneration, z.hashTable) + z.currentGeneration++ + if z.currentGeneration == 0 { // wrapped around, reset table + z.hashTable = make([]hashEntry, hashTableSize) + } + } + + // compressible and compressed size smaller than decompressed: ok! + if err == nil && n > 0 && len(zb.zdata) < len(zb.data) { + zb.compressed = true + zb.zdata = zbuf[:n] + } else { + zb.zdata = zb.data[zb.offset:] + } + + if z.BlockChecksum { + xxh := hashPool.Get() + xxh.Write(zb.zdata) + zb.checksum = xxh.Sum32() + hashPool.Put(xxh) + } + + return zb +} + +// writeBlock writes a frame block to the underlying io.Writer (size, data). +func (z *Writer) writeBlock(zb block) (int, error) { + bLen := uint32(len(zb.zdata)) + if !zb.compressed { + bLen |= 1 << 31 + } + + n := 0 + + binary.LittleEndian.PutUint32(z.writeSizeBuf, bLen) + n, err := z.dst.Write(z.writeSizeBuf) + if err != nil { + return n, err + } + + m, err := z.dst.Write(zb.zdata) + n += m + if err != nil { + return n, err + } + + if z.BlockChecksum { + binary.LittleEndian.PutUint32(z.writeSizeBuf, zb.checksum) + m, err := z.dst.Write(z.writeSizeBuf) + n += m + + if err != nil { + return n, err + } + } + + return n, nil +} + +// Flush flushes any pending compressed data to the underlying writer. +// Flush does not return until the data has been written. +// If the underlying writer returns an error, Flush returns that error. +// +// Flush is only required when in BlockDependency mode and the total of +// data written is less than 64Kb. +func (z *Writer) Flush() error { + if len(z.data) == 0 { + return nil + } + + zb := z.compressBlock(block{data: z.data}) + if _, err := z.writeBlock(zb); err != nil { + return err + } + return nil +} + +// Close closes the Writer, flushing any unwritten data to the underlying io.Writer, but does not close the underlying io.Writer. +func (z *Writer) Close() error { + if !z.Header.done { + if err := z.writeHeader(); err != nil { + return err + } + } + + // buffered data for the block dependency window + if z.BlockDependency && len(z.data) > 0 { + zb := block{data: z.data} + if _, err := z.writeBlock(z.compressBlock(zb)); err != nil { + return err + } + } + + if err := binary.Write(z.dst, binary.LittleEndian, uint32(0)); err != nil { + return err + } + if !z.NoChecksum { + if err := binary.Write(z.dst, binary.LittleEndian, z.checksum.Sum32()); err != nil { + return err + } + } + return nil +} + +// Reset clears the state of the Writer z such that it is equivalent to its +// initial state from NewWriter, but instead writing to w. +// No access to the underlying io.Writer is performed. +func (z *Writer) Reset(w io.Writer) { + z.Header = Header{} + z.dst = w + z.checksum.Reset() + z.data = nil + z.window = nil +} + +// ReadFrom compresses the data read from the io.Reader and writes it to the underlying io.Writer. +// Returns the number of bytes read. +// It does not close the Writer. +func (z *Writer) ReadFrom(r io.Reader) (n int64, err error) { + cpus := runtime.GOMAXPROCS(0) + buf := make([]byte, cpus*z.BlockMaxSize) + for { + m, er := io.ReadFull(r, buf) + n += int64(m) + if er == nil || er == io.ErrUnexpectedEOF || er == io.EOF { + if _, err = z.Write(buf[:m]); err != nil { + return + } + if er == nil { + continue + } + return + } + return n, er + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/pierrec/xxHash/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/pierrec/xxHash/LICENSE new file mode 100644 index 00000000..c1418f3f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/pierrec/xxHash/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2014, Pierre Curto +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of xxHash nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/elastic/beats/vendor/github.com/pierrec/xxHash/README.md b/vendor/github.com/elastic/beats/vendor/github.com/pierrec/xxHash/README.md new file mode 100644 index 00000000..aafd2843 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/pierrec/xxHash/README.md @@ -0,0 +1,36 @@ +[![godoc](https://godoc.org/github.com/pierrec/xxHash?status.png)](https://godoc.org/github.com/pierrec/xxHash) +[![Build Status](https://travis-ci.org/pierrec/xxHash.svg?branch=master)](https://travis-ci.org/pierrec/xxHash) + +# Pure Go implementation of xxHash (32 and 64 bits versions) + +## Synopsis + +xxHash is a very fast hashing algorithm (see the details [here](https://github.com/Cyan4973/xxHash/)). +This package implements xxHash in pure [Go](http://www.golang.com). + + +## Usage + +This package follows the hash interfaces (hash.Hash32 and hash.Hash64). + +```go + import ( + "fmt" + + "github.com/pierrec/xxHash/xxHash32" + ) + + x := xxHash32.New(0xCAFE) // hash.Hash32 + x.Write([]byte("abc")) + x.Write([]byte("def")) + fmt.Printf("%x\n", x.Sum32()) + + x.Reset() + x.Write([]byte("abc")) + fmt.Printf("%x\n", x.Sum32()) +``` + +## Command line utility + +A simple command line utility is provided to hash files content under the xxhsum directory. + diff --git a/vendor/github.com/elastic/beats/vendor/github.com/pierrec/xxHash/xxHash32/xxHash32.go b/vendor/github.com/elastic/beats/vendor/github.com/pierrec/xxHash/xxHash32/xxHash32.go new file mode 100644 index 00000000..411504e4 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/pierrec/xxHash/xxHash32/xxHash32.go @@ -0,0 +1,205 @@ +// Package xxHash32 implements the very fast xxHash hashing algorithm (32 bits version). +// (https://github.com/Cyan4973/xxHash/) +package xxHash32 + +import "hash" + +const ( + prime32_1 = 2654435761 + prime32_2 = 2246822519 + prime32_3 = 3266489917 + prime32_4 = 668265263 + prime32_5 = 374761393 +) + +type xxHash struct { + seed uint32 + v1 uint32 + v2 uint32 + v3 uint32 + v4 uint32 + totalLen uint64 + buf [16]byte + bufused int +} + +// New returns a new Hash32 instance. +func New(seed uint32) hash.Hash32 { + xxh := &xxHash{seed: seed} + xxh.Reset() + return xxh +} + +// Sum appends the current hash to b and returns the resulting slice. +// It does not change the underlying hash state. +func (xxh xxHash) Sum(b []byte) []byte { + h32 := xxh.Sum32() + return append(b, byte(h32), byte(h32>>8), byte(h32>>16), byte(h32>>24)) +} + +// Reset resets the Hash to its initial state. +func (xxh *xxHash) Reset() { + xxh.v1 = xxh.seed + prime32_1 + prime32_2 + xxh.v2 = xxh.seed + prime32_2 + xxh.v3 = xxh.seed + xxh.v4 = xxh.seed - prime32_1 + xxh.totalLen = 0 + xxh.bufused = 0 +} + +// Size returns the number of bytes returned by Sum(). +func (xxh *xxHash) Size() int { + return 4 +} + +// BlockSize gives the minimum number of bytes accepted by Write(). +func (xxh *xxHash) BlockSize() int { + return 1 +} + +// Write adds input bytes to the Hash. +// It never returns an error. +func (xxh *xxHash) Write(input []byte) (int, error) { + n := len(input) + m := xxh.bufused + + xxh.totalLen += uint64(n) + + r := len(xxh.buf) - m + if n < r { + copy(xxh.buf[m:], input) + xxh.bufused += len(input) + return n, nil + } + + p := 0 + if m > 0 { + // some data left from previous update + copy(xxh.buf[xxh.bufused:], input[:r]) + xxh.bufused += len(input) - r + + // fast rotl(13) + p32 := xxh.v1 + (uint32(xxh.buf[p+3])<<24|uint32(xxh.buf[p+2])<<16|uint32(xxh.buf[p+1])<<8|uint32(xxh.buf[p]))*prime32_2 + xxh.v1 = (p32<<13 | p32>>19) * prime32_1 + p += 4 + p32 = xxh.v2 + (uint32(xxh.buf[p+3])<<24|uint32(xxh.buf[p+2])<<16|uint32(xxh.buf[p+1])<<8|uint32(xxh.buf[p]))*prime32_2 + xxh.v2 = (p32<<13 | p32>>19) * prime32_1 + p += 4 + p32 = xxh.v3 + (uint32(xxh.buf[p+3])<<24|uint32(xxh.buf[p+2])<<16|uint32(xxh.buf[p+1])<<8|uint32(xxh.buf[p]))*prime32_2 + xxh.v3 = (p32<<13 | p32>>19) * prime32_1 + p += 4 + p32 = xxh.v4 + (uint32(xxh.buf[p+3])<<24|uint32(xxh.buf[p+2])<<16|uint32(xxh.buf[p+1])<<8|uint32(xxh.buf[p]))*prime32_2 + xxh.v4 = (p32<<13 | p32>>19) * prime32_1 + + p = r + xxh.bufused = 0 + } + + for n := n - 16; p <= n; { + p32 := xxh.v1 + (uint32(input[p+3])<<24|uint32(input[p+2])<<16|uint32(input[p+1])<<8|uint32(input[p]))*prime32_2 + xxh.v1 = (p32<<13 | p32>>19) * prime32_1 + p += 4 + p32 = xxh.v2 + (uint32(input[p+3])<<24|uint32(input[p+2])<<16|uint32(input[p+1])<<8|uint32(input[p]))*prime32_2 + xxh.v2 = (p32<<13 | p32>>19) * prime32_1 + p += 4 + p32 = xxh.v3 + (uint32(input[p+3])<<24|uint32(input[p+2])<<16|uint32(input[p+1])<<8|uint32(input[p]))*prime32_2 + xxh.v3 = (p32<<13 | p32>>19) * prime32_1 + p += 4 + p32 = xxh.v4 + (uint32(input[p+3])<<24|uint32(input[p+2])<<16|uint32(input[p+1])<<8|uint32(input[p]))*prime32_2 + xxh.v4 = (p32<<13 | p32>>19) * prime32_1 + p += 4 + } + + copy(xxh.buf[xxh.bufused:], input[p:]) + xxh.bufused += len(input) - p + + return n, nil +} + +// Sum32 returns the 32 bits Hash value. +func (xxh *xxHash) Sum32() uint32 { + h32 := uint32(xxh.totalLen) + if xxh.totalLen >= 16 { + h32 += ((xxh.v1 << 1) | (xxh.v1 >> 31)) + + ((xxh.v2 << 7) | (xxh.v2 >> 25)) + + ((xxh.v3 << 12) | (xxh.v3 >> 20)) + + ((xxh.v4 << 18) | (xxh.v4 >> 14)) + } else { + h32 += xxh.seed + prime32_5 + } + + p := 0 + n := xxh.bufused + for n := n - 4; p <= n; p += 4 { + h32 += (uint32(xxh.buf[p+3])<<24 | uint32(xxh.buf[p+2])<<16 | uint32(xxh.buf[p+1])<<8 | uint32(xxh.buf[p])) * prime32_3 + h32 = ((h32 << 17) | (h32 >> 15)) * prime32_4 + } + for ; p < n; p++ { + h32 += uint32(xxh.buf[p]) * prime32_5 + h32 = ((h32 << 11) | (h32 >> 21)) * prime32_1 + } + + h32 ^= h32 >> 15 + h32 *= prime32_2 + h32 ^= h32 >> 13 + h32 *= prime32_3 + h32 ^= h32 >> 16 + + return h32 +} + +// Checksum returns the 32bits Hash value. +func Checksum(input []byte, seed uint32) uint32 { + n := len(input) + h32 := uint32(n) + + if n < 16 { + h32 += seed + prime32_5 + } else { + v1 := seed + prime32_1 + prime32_2 + v2 := seed + prime32_2 + v3 := seed + v4 := seed - prime32_1 + p := 0 + for p <= n-16 { + v1 += (uint32(input[p+3])<<24 | uint32(input[p+2])<<16 | uint32(input[p+1])<<8 | uint32(input[p])) * prime32_2 + v1 = (v1<<13 | v1>>19) * prime32_1 + p += 4 + v2 += (uint32(input[p+3])<<24 | uint32(input[p+2])<<16 | uint32(input[p+1])<<8 | uint32(input[p])) * prime32_2 + v2 = (v2<<13 | v2>>19) * prime32_1 + p += 4 + v3 += (uint32(input[p+3])<<24 | uint32(input[p+2])<<16 | uint32(input[p+1])<<8 | uint32(input[p])) * prime32_2 + v3 = (v3<<13 | v3>>19) * prime32_1 + p += 4 + v4 += (uint32(input[p+3])<<24 | uint32(input[p+2])<<16 | uint32(input[p+1])<<8 | uint32(input[p])) * prime32_2 + v4 = (v4<<13 | v4>>19) * prime32_1 + p += 4 + } + input = input[p:] + n -= p + h32 += ((v1 << 1) | (v1 >> 31)) + + ((v2 << 7) | (v2 >> 25)) + + ((v3 << 12) | (v3 >> 20)) + + ((v4 << 18) | (v4 >> 14)) + } + + p := 0 + for p <= n-4 { + h32 += (uint32(input[p+3])<<24 | uint32(input[p+2])<<16 | uint32(input[p+1])<<8 | uint32(input[p])) * prime32_3 + h32 = ((h32 << 17) | (h32 >> 15)) * prime32_4 + p += 4 + } + for p < n { + h32 += uint32(input[p]) * prime32_5 + h32 = ((h32 << 11) | (h32 >> 21)) * prime32_1 + p++ + } + + h32 ^= h32 >> 15 + h32 *= prime32_2 + h32 ^= h32 >> 13 + h32 *= prime32_3 + h32 ^= h32 >> 16 + + return h32 +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/pierrec/xxHash/xxHash64/xxHash64.go b/vendor/github.com/elastic/beats/vendor/github.com/pierrec/xxHash/xxHash64/xxHash64.go new file mode 100644 index 00000000..2788e950 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/pierrec/xxHash/xxHash64/xxHash64.go @@ -0,0 +1,249 @@ +// Package xxHash64 implements the very fast xxHash hashing algorithm (64 bits version). +// (https://github.com/Cyan4973/xxHash/) +package xxHash64 + +import "hash" + +const ( + prime64_1 = 11400714785074694791 + prime64_2 = 14029467366897019727 + prime64_3 = 1609587929392839161 + prime64_4 = 9650029242287828579 + prime64_5 = 2870177450012600261 +) + +type xxHash struct { + seed uint64 + v1 uint64 + v2 uint64 + v3 uint64 + v4 uint64 + totalLen uint64 + buf [32]byte + bufused int +} + +// New returns a new Hash64 instance. +func New(seed uint64) hash.Hash64 { + xxh := &xxHash{seed: seed} + xxh.Reset() + return xxh +} + +// Sum appends the current hash to b and returns the resulting slice. +// It does not change the underlying hash state. +func (xxh xxHash) Sum(b []byte) []byte { + h64 := xxh.Sum64() + return append(b, byte(h64), byte(h64>>8), byte(h64>>16), byte(h64>>24), byte(h64>>32), byte(h64>>40), byte(h64>>48), byte(h64>>56)) +} + +// Reset resets the Hash to its initial state. +func (xxh *xxHash) Reset() { + xxh.v1 = xxh.seed + prime64_1 + prime64_2 + xxh.v2 = xxh.seed + prime64_2 + xxh.v3 = xxh.seed + xxh.v4 = xxh.seed - prime64_1 + xxh.totalLen = 0 + xxh.bufused = 0 +} + +// Size returns the number of bytes returned by Sum(). +func (xxh *xxHash) Size() int { + return 8 +} + +// BlockSize gives the minimum number of bytes accepted by Write(). +func (xxh *xxHash) BlockSize() int { + return 1 +} + +// Write adds input bytes to the Hash. +// It never returns an error. +func (xxh *xxHash) Write(input []byte) (int, error) { + n := len(input) + m := xxh.bufused + + xxh.totalLen += uint64(n) + + r := len(xxh.buf) - m + if n < r { + copy(xxh.buf[m:], input) + xxh.bufused += len(input) + return n, nil + } + + p := 0 + if m > 0 { + // some data left from previous update + copy(xxh.buf[xxh.bufused:], input[:r]) + xxh.bufused += len(input) - r + + // fast rotl(31) + p64 := xxh.v1 + (uint64(xxh.buf[p+7])<<56|uint64(xxh.buf[p+6])<<48|uint64(xxh.buf[p+5])<<40|uint64(xxh.buf[p+4])<<32|uint64(xxh.buf[p+3])<<24|uint64(xxh.buf[p+2])<<16|uint64(xxh.buf[p+1])<<8|uint64(xxh.buf[p]))*prime64_2 + xxh.v1 = (p64<<31 | p64>>33) * prime64_1 + p += 8 + p64 = xxh.v2 + (uint64(xxh.buf[p+7])<<56|uint64(xxh.buf[p+6])<<48|uint64(xxh.buf[p+5])<<40|uint64(xxh.buf[p+4])<<32|uint64(xxh.buf[p+3])<<24|uint64(xxh.buf[p+2])<<16|uint64(xxh.buf[p+1])<<8|uint64(xxh.buf[p]))*prime64_2 + xxh.v2 = (p64<<31 | p64>>33) * prime64_1 + p += 8 + p64 = xxh.v3 + (uint64(xxh.buf[p+7])<<56|uint64(xxh.buf[p+6])<<48|uint64(xxh.buf[p+5])<<40|uint64(xxh.buf[p+4])<<32|uint64(xxh.buf[p+3])<<24|uint64(xxh.buf[p+2])<<16|uint64(xxh.buf[p+1])<<8|uint64(xxh.buf[p]))*prime64_2 + xxh.v3 = (p64<<31 | p64>>33) * prime64_1 + p += 8 + p64 = xxh.v4 + (uint64(xxh.buf[p+7])<<56|uint64(xxh.buf[p+6])<<48|uint64(xxh.buf[p+5])<<40|uint64(xxh.buf[p+4])<<32|uint64(xxh.buf[p+3])<<24|uint64(xxh.buf[p+2])<<16|uint64(xxh.buf[p+1])<<8|uint64(xxh.buf[p]))*prime64_2 + xxh.v4 = (p64<<31 | p64>>33) * prime64_1 + + p = r + xxh.bufused = 0 + } + + for n := n - 32; p <= n; { + p64 := xxh.v1 + (uint64(input[p+7])<<56|uint64(input[p+6])<<48|uint64(input[p+5])<<40|uint64(input[p+4])<<32|uint64(input[p+3])<<24|uint64(input[p+2])<<16|uint64(input[p+1])<<8|uint64(input[p]))*prime64_2 + xxh.v1 = (p64<<31 | p64>>33) * prime64_1 + p += 8 + p64 = xxh.v2 + (uint64(input[p+7])<<56|uint64(input[p+6])<<48|uint64(input[p+5])<<40|uint64(input[p+4])<<32|uint64(input[p+3])<<24|uint64(input[p+2])<<16|uint64(input[p+1])<<8|uint64(input[p]))*prime64_2 + xxh.v2 = (p64<<31 | p64>>33) * prime64_1 + p += 8 + p64 = xxh.v3 + (uint64(input[p+7])<<56|uint64(input[p+6])<<48|uint64(input[p+5])<<40|uint64(input[p+4])<<32|uint64(input[p+3])<<24|uint64(input[p+2])<<16|uint64(input[p+1])<<8|uint64(input[p]))*prime64_2 + xxh.v3 = (p64<<31 | p64>>33) * prime64_1 + p += 8 + p64 = xxh.v4 + (uint64(input[p+7])<<56|uint64(input[p+6])<<48|uint64(input[p+5])<<40|uint64(input[p+4])<<32|uint64(input[p+3])<<24|uint64(input[p+2])<<16|uint64(input[p+1])<<8|uint64(input[p]))*prime64_2 + xxh.v4 = (p64<<31 | p64>>33) * prime64_1 + p += 8 + } + + copy(xxh.buf[xxh.bufused:], input[p:]) + xxh.bufused += len(input) - p + + return n, nil +} + +// Sum64 returns the 64bits Hash value. +func (xxh *xxHash) Sum64() uint64 { + var h64 uint64 + if xxh.totalLen >= 32 { + h64 = ((xxh.v1 << 1) | (xxh.v1 >> 63)) + + ((xxh.v2 << 7) | (xxh.v2 >> 57)) + + ((xxh.v3 << 12) | (xxh.v3 >> 52)) + + ((xxh.v4 << 18) | (xxh.v4 >> 46)) + + xxh.v1 *= prime64_2 + h64 ^= ((xxh.v1 << 31) | (xxh.v1 >> 33)) * prime64_1 + h64 = h64*prime64_1 + prime64_4 + + xxh.v2 *= prime64_2 + h64 ^= ((xxh.v2 << 31) | (xxh.v2 >> 33)) * prime64_1 + h64 = h64*prime64_1 + prime64_4 + + xxh.v3 *= prime64_2 + h64 ^= ((xxh.v3 << 31) | (xxh.v3 >> 33)) * prime64_1 + h64 = h64*prime64_1 + prime64_4 + + xxh.v4 *= prime64_2 + h64 ^= ((xxh.v4 << 31) | (xxh.v4 >> 33)) * prime64_1 + h64 = h64*prime64_1 + prime64_4 + xxh.totalLen + } else { + h64 = xxh.seed + prime64_5 + xxh.totalLen + } + + p := 0 + n := xxh.bufused + for n := n - 8; p <= n; p += 8 { + p64 := (uint64(xxh.buf[p+7])<<56 | uint64(xxh.buf[p+6])<<48 | uint64(xxh.buf[p+5])<<40 | uint64(xxh.buf[p+4])<<32 | uint64(xxh.buf[p+3])<<24 | uint64(xxh.buf[p+2])<<16 | uint64(xxh.buf[p+1])<<8 | uint64(xxh.buf[p])) * prime64_2 + h64 ^= ((p64 << 31) | (p64 >> 33)) * prime64_1 + h64 = ((h64<<27)|(h64>>37))*prime64_1 + prime64_4 + } + if p+4 <= n { + h64 ^= (uint64(xxh.buf[p+3])<<24 | uint64(xxh.buf[p+2])<<16 | uint64(xxh.buf[p+1])<<8 | uint64(xxh.buf[p])) * prime64_1 + h64 = ((h64<<23)|(h64>>41))*prime64_2 + prime64_3 + p += 4 + } + for ; p < n; p++ { + h64 ^= uint64(xxh.buf[p]) * prime64_5 + h64 = ((h64 << 11) | (h64 >> 53)) * prime64_1 + } + + h64 ^= h64 >> 33 + h64 *= prime64_2 + h64 ^= h64 >> 29 + h64 *= prime64_3 + h64 ^= h64 >> 32 + + return h64 +} + +// Checksum returns the 64bits Hash value. +func Checksum(input []byte, seed uint64) uint64 { + n := len(input) + var h64 uint64 + + if n >= 32 { + v1 := seed + prime64_1 + prime64_2 + v2 := seed + prime64_2 + v3 := seed + v4 := seed - prime64_1 + p := 0 + for n := n - 32; p <= n; { + p64 := v1 + (uint64(input[p+7])<<56|uint64(input[p+6])<<48|uint64(input[p+5])<<40|uint64(input[p+4])<<32|uint64(input[p+3])<<24|uint64(input[p+2])<<16|uint64(input[p+1])<<8|uint64(input[p]))*prime64_2 + v1 = (p64<<31 | p64>>33) * prime64_1 + p += 8 + p64 = v2 + (uint64(input[p+7])<<56|uint64(input[p+6])<<48|uint64(input[p+5])<<40|uint64(input[p+4])<<32|uint64(input[p+3])<<24|uint64(input[p+2])<<16|uint64(input[p+1])<<8|uint64(input[p]))*prime64_2 + v2 = (p64<<31 | p64>>33) * prime64_1 + p += 8 + p64 = v3 + (uint64(input[p+7])<<56|uint64(input[p+6])<<48|uint64(input[p+5])<<40|uint64(input[p+4])<<32|uint64(input[p+3])<<24|uint64(input[p+2])<<16|uint64(input[p+1])<<8|uint64(input[p]))*prime64_2 + v3 = (p64<<31 | p64>>33) * prime64_1 + p += 8 + p64 = v4 + (uint64(input[p+7])<<56|uint64(input[p+6])<<48|uint64(input[p+5])<<40|uint64(input[p+4])<<32|uint64(input[p+3])<<24|uint64(input[p+2])<<16|uint64(input[p+1])<<8|uint64(input[p]))*prime64_2 + v4 = (p64<<31 | p64>>33) * prime64_1 + p += 8 + } + + h64 = ((v1 << 1) | (v1 >> 63)) + + ((v2 << 7) | (v2 >> 57)) + + ((v3 << 12) | (v3 >> 52)) + + ((v4 << 18) | (v4 >> 46)) + + v1 *= prime64_2 + h64 ^= ((v1 << 31) | (v1 >> 33)) * prime64_1 + h64 = h64*prime64_1 + prime64_4 + + v2 *= prime64_2 + h64 ^= ((v2 << 31) | (v2 >> 33)) * prime64_1 + h64 = h64*prime64_1 + prime64_4 + + v3 *= prime64_2 + h64 ^= ((v3 << 31) | (v3 >> 33)) * prime64_1 + h64 = h64*prime64_1 + prime64_4 + + v4 *= prime64_2 + h64 ^= ((v4 << 31) | (v4 >> 33)) * prime64_1 + h64 = h64*prime64_1 + prime64_4 + uint64(n) + + input = input[p:] + n -= p + } else { + h64 = seed + prime64_5 + uint64(n) + } + + p := 0 + for n := n - 8; p <= n; p += 8 { + p64 := (uint64(input[p+7])<<56 | uint64(input[p+6])<<48 | uint64(input[p+5])<<40 | uint64(input[p+4])<<32 | uint64(input[p+3])<<24 | uint64(input[p+2])<<16 | uint64(input[p+1])<<8 | uint64(input[p])) * prime64_2 + h64 ^= ((p64 << 31) | (p64 >> 33)) * prime64_1 + h64 = ((h64<<27)|(h64>>37))*prime64_1 + prime64_4 + } + if p+4 <= n { + h64 ^= (uint64(input[p+3])<<24 | uint64(input[p+2])<<16 | uint64(input[p+1])<<8 | uint64(input[p])) * prime64_1 + h64 = ((h64<<23)|(h64>>41))*prime64_2 + prime64_3 + p += 4 + } + for ; p < n; p++ { + h64 ^= uint64(input[p]) * prime64_5 + h64 = ((h64 << 11) | (h64 >> 53)) * prime64_1 + } + + h64 ^= h64 >> 33 + h64 *= prime64_2 + h64 ^= h64 >> 29 + h64 *= prime64_3 + h64 ^= h64 >> 32 + + return h64 +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/pierrec/xxHash/xxhsum/main.go b/vendor/github.com/elastic/beats/vendor/github.com/pierrec/xxHash/xxhsum/main.go new file mode 100644 index 00000000..b73158bd --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/pierrec/xxHash/xxhsum/main.go @@ -0,0 +1,44 @@ +// Command line interface to the xxHash32 and xxHash64 packages. +// Usage: +// xxHash [-mode 0] [-seed 123] filename1 [filename2...] +// where +// mode: hash mode (0=32bits, 1=64bits) (default=1) +// seed: seed to be used (default=0) +package main + +import ( + "flag" + "fmt" + "hash" + "io" + "os" + + "github.com/pierrec/xxHash/xxHash32" + "github.com/pierrec/xxHash/xxHash64" +) + +func main() { + seed := flag.Uint64("seed", 0, "seed value") + mode := flag.Int("mode", 1, "hash mode: 0=32bits, 1=64bits") + flag.Parse() + + var xxh hash.Hash + if *mode == 0 { + xxh = xxHash32.New(uint32(*seed)) + } else { + xxh = xxHash64.New(*seed) + } + + // Process each file in sequence + for _, filename := range flag.Args() { + inputFile, err := os.Open(filename) + if err != nil { + continue + } + if _, err := io.Copy(xxh, inputFile); err == nil { + fmt.Printf("%x %s\n", xxh.Sum(nil), filename) + } + inputFile.Close() + xxh.Reset() + } +} diff --git a/vendor/github.com/elastic/beats/winlogbeat/_meta/kibana/visualization/Event-Levels.json b/vendor/github.com/elastic/beats/winlogbeat/_meta/kibana/visualization/Event-Levels.json index 36faf385..85edcdbf 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/_meta/kibana/visualization/Event-Levels.json +++ b/vendor/github.com/elastic/beats/winlogbeat/_meta/kibana/visualization/Event-Levels.json @@ -1,10 +1,10 @@ { - "visState": "{\"type\":\"table\",\"params\":{\"perPage\":5,\"showPartialRows\":false,\"showMeticsAtAllLevels\":false},\"aggs\":[{\"id\":\"1\",\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"level\",\"exclude\":{\"pattern\":\"\\\"\\\"\"},\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}", + "visState": "{\"title\":\"Event Levels\",\"type\":\"table\",\"params\":{\"perPage\":10,\"showPartialRows\":false,\"showMeticsAtAllLevels\":false,\"sort\":{\"columnIndex\":null,\"direction\":null},\"showTotal\":false,\"totalFunc\":\"sum\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"level\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}", "description": "", "title": "Event Levels", - "uiStateJSON": "{}", + "uiStateJSON": "{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}}", "version": 1, "kibanaSavedObjectMeta": { - "searchSourceJSON": "{\"index\":\"winlogbeat-*\",\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"*\"}},\"filter\":[]}" + "searchSourceJSON": "{\"index\":\"winlogbeat-*\",\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true}},\"filter\":[]}" } } \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/winlogbeat/_meta/kibana/visualization/Sources.json b/vendor/github.com/elastic/beats/winlogbeat/_meta/kibana/visualization/Sources.json index 5e50e0f8..b80ae7fc 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/_meta/kibana/visualization/Sources.json +++ b/vendor/github.com/elastic/beats/winlogbeat/_meta/kibana/visualization/Sources.json @@ -1,10 +1,10 @@ { - "visState": "{\n \"type\": \"pie\",\n \"params\": {\n \"shareYAxis\": true,\n \"addTooltip\": true,\n \"addLegend\": true,\n \"isDonut\": false\n },\n \"aggs\": [\n {\n \"id\": \"1\",\n \"type\": \"count\",\n \"schema\": \"metric\",\n \"params\": {}\n },\n {\n \"id\": \"2\",\n \"type\": \"terms\",\n \"schema\": \"segment\",\n \"params\": {\n \"field\": \"source_name\",\n \"exclude\": {\n \"pattern\": \"\\\"\\\"\"\n },\n \"size\": 7,\n \"order\": \"desc\",\n \"orderBy\": \"1\"\n }\n }\n ],\n \"listeners\": {}\n}", + "visState": "{\"title\":\"Sources\",\"type\":\"pie\",\"params\":{\"shareYAxis\":true,\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"isDonut\":false},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"source_name\",\"size\":7,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}", "description": "", - "title": "Event Sources", + "title": "Sources", "uiStateJSON": "{}", "version": 1, "kibanaSavedObjectMeta": { - "searchSourceJSON": "{\n \"index\": \"winlogbeat-*\",\n \"query\": {\n \"query_string\": {\n \"query\": \"*\",\n \"analyze_wildcard\": true\n }\n },\n \"filter\": []\n}" + "searchSourceJSON": "{\"index\":\"winlogbeat-*\",\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true}},\"filter\":[]}" } } \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/winlogbeat/docs/getting-started.asciidoc b/vendor/github.com/elastic/beats/winlogbeat/docs/getting-started.asciidoc index 8ce6937f..b929904b 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/docs/getting-started.asciidoc +++ b/vendor/github.com/elastic/beats/winlogbeat/docs/getting-started.asciidoc @@ -57,7 +57,9 @@ execution policy for the current session to allow the script to run. For example Before starting Winlogbeat, you should look at the configuration options in the configuration file, for example `C:\Program Files\Winlogbeat\winlogbeat.yml`. -For more information about these options, see <>. +There’s also a full example configuration file called `winlogbeat.full.yml` that +shows all non-deprecated options. For more information about these options, see +<>. [[winlogbeat-configuration]] === Step 2: Configuring Winlogbeat diff --git a/vendor/github.com/elastic/beats/winlogbeat/docs/images/kibana-created-indexes.png b/vendor/github.com/elastic/beats/winlogbeat/docs/images/kibana-created-indexes.png index 232ae476..648fdc93 100644 Binary files a/vendor/github.com/elastic/beats/winlogbeat/docs/images/kibana-created-indexes.png and b/vendor/github.com/elastic/beats/winlogbeat/docs/images/kibana-created-indexes.png differ diff --git a/vendor/github.com/elastic/beats/winlogbeat/docs/images/kibana-navigation-vis.png b/vendor/github.com/elastic/beats/winlogbeat/docs/images/kibana-navigation-vis.png index 9bedaf04..820b97a9 100644 Binary files a/vendor/github.com/elastic/beats/winlogbeat/docs/images/kibana-navigation-vis.png and b/vendor/github.com/elastic/beats/winlogbeat/docs/images/kibana-navigation-vis.png differ diff --git a/vendor/github.com/elastic/beats/winlogbeat/docs/index.asciidoc b/vendor/github.com/elastic/beats/winlogbeat/docs/index.asciidoc index 645ada2d..53437fbc 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/docs/index.asciidoc +++ b/vendor/github.com/elastic/beats/winlogbeat/docs/index.asciidoc @@ -1,6 +1,6 @@ = Winlogbeat Reference -include::./version.asciidoc[] +include::../../libbeat/docs/version.asciidoc[] :libbeat: http://www.elastic.co/guide/en/beats/libbeat/{doc-branch} :packetbeat: http://www.elastic.co/guide/en/beats/packetbeat/{doc-branch} @@ -8,7 +8,7 @@ include::./version.asciidoc[] :filebeat: http://www.elastic.co/guide/en/beats/filebeat/{doc-branch} :winlogbeat: http://www.elastic.co/guide/en/beats/winlogbeat/{doc-branch} :elasticsearch: https://www.elastic.co/guide/en/elasticsearch/reference/{doc-branch} -:securitydoc: https://www.elastic.co/guide/en/x-pack/5.0 +:securitydoc: https://www.elastic.co/guide/en/x-pack/5.2 :version: {stack-version} :beatname_lc: winlogbeat :beatname_uc: Winlogbeat diff --git a/vendor/github.com/elastic/beats/winlogbeat/docs/securing-winlogbeat.asciidoc b/vendor/github.com/elastic/beats/winlogbeat/docs/securing-winlogbeat.asciidoc index df18577a..09f2c3f0 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/docs/securing-winlogbeat.asciidoc +++ b/vendor/github.com/elastic/beats/winlogbeat/docs/securing-winlogbeat.asciidoc @@ -9,12 +9,16 @@ The following topics describe how to secure communication between Winlogbeat and * <> * <> +//sets block macro for https.asciidoc included in next section + -- [[securing-communication-elasticsearch]] == Securing Communication With Elasticsearch include::../../libbeat/docs/https.asciidoc[] +//sets block macro for shared-ssl-logstash-config.asciidoc included in next section + [[configuring-ssl-logstash]] == Securing Communication With Logstash by Using SSL include::../../libbeat/docs/shared-ssl-logstash-config.asciidoc[] diff --git a/vendor/github.com/elastic/beats/winlogbeat/docs/troubleshooting.asciidoc b/vendor/github.com/elastic/beats/winlogbeat/docs/troubleshooting.asciidoc index cf3807f0..8a08e470 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/docs/troubleshooting.asciidoc +++ b/vendor/github.com/elastic/beats/winlogbeat/docs/troubleshooting.asciidoc @@ -9,6 +9,8 @@ If you have issues installing or running Winlogbeat, read the following tips: * <> * <> +//sets block macro for getting-help.asciidoc included in next section + -- [[getting-help]] @@ -16,6 +18,8 @@ If you have issues installing or running Winlogbeat, read the following tips: include::../../libbeat/docs/getting-help.asciidoc[] +//sets block macro for debugging.asciidoc included in next section + [[enable-winlogbeat-debugging]] == Debugging diff --git a/vendor/github.com/elastic/beats/winlogbeat/eventlog/wineventlog.go b/vendor/github.com/elastic/beats/winlogbeat/eventlog/wineventlog.go index 7caab618..69a3b8ab 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/eventlog/wineventlog.go +++ b/vendor/github.com/elastic/beats/winlogbeat/eventlog/wineventlog.go @@ -4,6 +4,7 @@ package eventlog import ( "fmt" + "io" "syscall" "time" @@ -75,9 +76,10 @@ type winEventLog struct { maxRead int // Maximum number returned in one Read. lastRead uint64 // Record number of the last read event. - render func(event win.EvtHandle) (string, error) // Function for rendering the event to XML. - renderBuf []byte // Buffer used for rendering event. - cache *messageFilesCache // Cached mapping of source name to event message file handles. + render func(event win.EvtHandle, out io.Writer) error // Function for rendering the event to XML. + renderBuf []byte // Buffer used for rendering event. + outputBuf *sys.ByteBuffer // Buffer for receiving XML + cache *messageFilesCache // Cached mapping of source name to event message file handles. logPrefix string // String to prefix on log messages. eventMetadata common.EventMetadata // Field and tags to add to each event. @@ -132,20 +134,22 @@ func (l *winEventLog) Read() ([]Record, error) { var records []Record for _, h := range handles { - x, err := l.render(h) + l.outputBuf.Reset() + err := l.render(h, l.outputBuf) if bufErr, ok := err.(sys.InsufficientBufferError); ok { detailf("%s Increasing render buffer size to %d", l.logPrefix, bufErr.RequiredSize) l.renderBuf = make([]byte, bufErr.RequiredSize) - x, err = l.render(h) + l.outputBuf.Reset() + err = l.render(h, l.outputBuf) } - if err != nil && x == "" { + if err != nil && l.outputBuf.Len() == 0 { logp.Err("%s Dropping event with rendering error. %v", l.logPrefix, err) incrementMetric(dropReasons, err) continue } - r, err := l.buildRecordFromXML(x, err) + r, err := l.buildRecordFromXML(l.outputBuf.Bytes(), err) if err != nil { logp.Err("%s Dropping event. %v", l.logPrefix, err) incrementMetric(dropReasons, err) @@ -192,8 +196,8 @@ func (l *winEventLog) eventHandles(maxRead int) ([]win.EvtHandle, int, error) { } } -func (l *winEventLog) buildRecordFromXML(x string, recoveredErr error) (Record, error) { - e, err := sys.UnmarshalEventXML([]byte(x)) +func (l *winEventLog) buildRecordFromXML(x []byte, recoveredErr error) (Record, error) { + e, err := sys.UnmarshalEventXML(x) if err != nil { return Record{}, fmt.Errorf("Failed to unmarshal XML='%s'. %v", x, err) } @@ -213,7 +217,7 @@ func (l *winEventLog) buildRecordFromXML(x string, recoveredErr error) (Record, } if logp.IsDebug(detailSelector) { - detailf("%s XML=%s Event=%+v", l.logPrefix, x, e) + detailf("%s XML=%s Event=%+v", l.logPrefix, string(x), e) } r := Record{ @@ -223,7 +227,7 @@ func (l *winEventLog) buildRecordFromXML(x string, recoveredErr error) (Record, } if l.config.IncludeXML { - r.XML = x + r.XML = string(x) } return r, nil @@ -270,6 +274,7 @@ func newWinEventLog(options map[string]interface{}) (EventLog, error) { channelName: c.Name, maxRead: c.BatchReadSize, renderBuf: make([]byte, renderBufferSize), + outputBuf: sys.NewByteBuffer(renderBufferSize), cache: newMessageFilesCache(c.Name, eventMetadataHandle, freeHandle), logPrefix: fmt.Sprintf("WinEventLog[%s]", c.Name), eventMetadata: c.EventMetadata, @@ -281,12 +286,12 @@ func newWinEventLog(options map[string]interface{}) (EventLog, error) { switch { case c.Forwarded == nil && c.Name == "ForwardedEvents", c.Forwarded != nil && *c.Forwarded == true: - l.render = func(event win.EvtHandle) (string, error) { - return win.RenderEventXML(event, l.renderBuf) + l.render = func(event win.EvtHandle, out io.Writer) error { + return win.RenderEventXML(event, l.renderBuf, out) } default: - l.render = func(event win.EvtHandle) (string, error) { - return win.RenderEvent(event, 0, l.renderBuf, l.cache.get) + l.render = func(event win.EvtHandle, out io.Writer) error { + return win.RenderEvent(event, 0, l.renderBuf, l.cache.get, out) } } diff --git a/vendor/github.com/elastic/beats/winlogbeat/sys/buffer.go b/vendor/github.com/elastic/beats/winlogbeat/sys/buffer.go new file mode 100644 index 00000000..799a383d --- /dev/null +++ b/vendor/github.com/elastic/beats/winlogbeat/sys/buffer.go @@ -0,0 +1,46 @@ +package sys + +// ByteBuffer is an expandable buffer backed by a byte slice. +type ByteBuffer struct { + buf []byte + offset int +} + +// NewByteBuffer creates a new ByteBuffer with an initial capacity of +// initialSize. +func NewByteBuffer(initialSize int) *ByteBuffer { + return &ByteBuffer{buf: make([]byte, initialSize)} +} + +// Write appends the contents of p to the buffer, growing the buffer as needed. +// The return value is the length of p; err is always nil. +func (b *ByteBuffer) Write(p []byte) (int, error) { + if len(b.buf) < b.offset+len(p) { + // Create a buffer larger than needed so we don't spend lots of time + // allocating and copying. + spaceNeeded := len(b.buf) - b.offset + len(p) + largerBuf := make([]byte, 2*len(b.buf)+spaceNeeded) + copy(largerBuf, b.buf[:b.offset]) + b.buf = largerBuf + } + n := copy(b.buf[b.offset:], p) + b.offset += n + return n, nil +} + +// Reset resets the buffer to be empty. It retains the same underlying storage. +func (b *ByteBuffer) Reset() { + b.offset = 0 + b.buf = b.buf[:cap(b.buf)] +} + +// Bytes returns a slice of length b.Len() holding the bytes that have been +// written to the buffer. +func (b *ByteBuffer) Bytes() []byte { + return b.buf[:b.offset] +} + +// Len returns the number of bytes that have been written to the buffer. +func (b *ByteBuffer) Len() int { + return b.offset +} diff --git a/vendor/github.com/elastic/beats/winlogbeat/sys/buffer_test.go b/vendor/github.com/elastic/beats/winlogbeat/sys/buffer_test.go new file mode 100644 index 00000000..825d916f --- /dev/null +++ b/vendor/github.com/elastic/beats/winlogbeat/sys/buffer_test.go @@ -0,0 +1,102 @@ +package sys + +import ( + "bytes" + "io" + "testing" + + "github.com/stretchr/testify/assert" +) + +var _ io.Writer = &ByteBuffer{} + +func TestByteBuffer(t *testing.T) { + input := "hello" + length := len(input) + buf := NewByteBuffer(1024) + + n, err := buf.Write([]byte(input)) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, length, n) + + assert.Equal(t, input, string(buf.Bytes())) + assert.Equal(t, length, len(buf.Bytes())) + assert.Equal(t, length, buf.Len()) +} + +func TestByteBufferGrow(t *testing.T) { + input := "hello" + length := len(input) + buf := NewByteBuffer(0) + + n, err := buf.Write([]byte(input)) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, length, n) + + assert.Equal(t, input, string(buf.Bytes())) + assert.Equal(t, length, len(buf.Bytes())) + assert.Equal(t, length, buf.Len()) + assert.Equal(t, length, len(buf.buf)) + + n, err = buf.Write([]byte(input)) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, length, n) + + assert.Equal(t, input+input, string(buf.Bytes())) + assert.Equal(t, 2*length, len(buf.Bytes())) + assert.Equal(t, 2*length, buf.Len()) +} + +func BenchmarkByteBuffer(b *testing.B) { + input := []byte("test writing this sentence to a buffer") + + b.Run("byteBuffer", func(b *testing.B) { + buf := NewByteBuffer(1024) + b.ResetTimer() + + for i := 0; i < b.N; i++ { + buf.Write(input) + buf.Bytes() + buf.Reset() + } + }) + + b.Run("bytes.Buffer", func(b *testing.B) { + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + b.ResetTimer() + + for i := 0; i < b.N; i++ { + buf.Write(input) + buf.Bytes() + buf.Reset() + } + }) +} + +func BenchmarkByteBufferGrow(b *testing.B) { + b.Run("byteBuffer", func(b *testing.B) { + buf := NewByteBuffer(0) + b.ResetTimer() + + for i := 0; i < b.N; i++ { + buf.Write([]byte("a")) + buf.Bytes() + } + }) + + b.Run("bytes.Buffer", func(b *testing.B) { + buf := bytes.NewBuffer(make([]byte, 0)) + b.ResetTimer() + + for i := 0; i < b.N; i++ { + buf.Write([]byte("a")) + buf.Bytes() + } + }) +} diff --git a/vendor/github.com/elastic/beats/winlogbeat/sys/strings.go b/vendor/github.com/elastic/beats/winlogbeat/sys/strings.go index 25f27a9d..ce9d4db9 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/sys/strings.go +++ b/vendor/github.com/elastic/beats/winlogbeat/sys/strings.go @@ -1,34 +1,114 @@ package sys import ( + "errors" "fmt" + "io" "strings" "unicode/utf16" + "unicode/utf8" ) -// UTF16BytesToString returns the Unicode code point sequence represented -// by the UTF-16 buffer b. -func UTF16BytesToString(b []byte) (string, int, error) { - if len(b)%2 != 0 { - return "", 0, fmt.Errorf("Slice must have an even length (length=%d)", - len(b)) +// The conditions replacementChar==unicode.ReplacementChar and +// maxRune==unicode.MaxRune are verified in the tests. +// Defining them locally avoids this package depending on package unicode. + +const ( + replacementChar = '\uFFFD' // Unicode replacement character + maxRune = '\U0010FFFF' // Maximum valid Unicode code point. +) + +const ( + // 0xd800-0xdc00 encodes the high 10 bits of a pair. + // 0xdc00-0xe000 encodes the low 10 bits of a pair. + // the value is those 20 bits plus 0x10000. + surr1 = 0xd800 + surr2 = 0xdc00 + surr3 = 0xe000 + + surrSelf = 0x10000 +) + +var ErrBufferTooSmall = errors.New("buffer too small") + +func UTF16ToUTF8Bytes(in []byte, out io.Writer) error { + if len(in)%2 != 0 { + return fmt.Errorf("input buffer must have an even length (length=%d)", len(in)) } - offset := len(b)/2 + 2 - s := make([]uint16, len(b)/2) - for i := range s { - s[i] = uint16(b[i*2]) + uint16(b[(i*2)+1])<<8 + var runeBuf [4]byte + var v1, v2 uint16 + for i := 0; i < len(in); i += 2 { + v1 = uint16(in[i]) | uint16(in[i+1])<<8 - if s[i] == 0 { - s = s[0:i] - offset = i*2 + 2 - break + switch { + case v1 < surr1, surr3 <= v1: + n := utf8.EncodeRune(runeBuf[:], rune(v1)) + out.Write(runeBuf[:n]) + case surr1 <= v1 && v1 < surr2 && len(in) > i+2: + v2 = uint16(in[i+2]) | uint16(in[i+3])<<8 + if surr2 <= v2 && v2 < surr3 { + // valid surrogate sequence + r := utf16.DecodeRune(rune(v1), rune(v2)) + n := utf8.EncodeRune(runeBuf[:], r) + out.Write(runeBuf[:n]) + } + i += 2 + default: + // invalid surrogate sequence + n := utf8.EncodeRune(runeBuf[:], replacementChar) + out.Write(runeBuf[:n]) } } + return nil +} + +// UTF16BytesToString returns a string that is decoded from the UTF-16 bytes. +// The byte slice must be of even length otherwise an error will be returned. +// The integer returned is the offset to the start of the next string with +// buffer if it exists, otherwise -1 is returned. +func UTF16BytesToString(b []byte) (string, int, error) { + if len(b)%2 != 0 { + return "", 0, fmt.Errorf("Slice must have an even length (length=%d)", len(b)) + } + + offset := -1 + + // Find the null terminator if it exists and re-slice the b. + if nullIndex := indexNullTerminator(b); nullIndex > 0 { + if len(b) > nullIndex+2 { + offset = nullIndex + 2 + } + + b = b[:nullIndex] + } + + s := make([]uint16, len(b)/2) + for i := range s { + s[i] = uint16(b[i*2]) + uint16(b[(i*2)+1])<<8 + } + return string(utf16.Decode(s)), offset, nil } +// indexNullTerminator returns the index of a null terminator within a buffer +// containing UTF-16 encoded data. If the null terminator is not found -1 is +// returned. +func indexNullTerminator(b []byte) int { + if len(b) < 2 { + return -1 + } + + for i := 0; i < len(b); i += 2 { + if b[i] == 0 && b[i+1] == 0 { + return i + } + } + + return -1 +} + // RemoveWindowsLineEndings replaces carriage return line feed (CRLF) with // line feed (LF) and trims any newline character that may exist at the end // of the string. diff --git a/vendor/github.com/elastic/beats/winlogbeat/sys/strings_test.go b/vendor/github.com/elastic/beats/winlogbeat/sys/strings_test.go new file mode 100644 index 00000000..f7e3443e --- /dev/null +++ b/vendor/github.com/elastic/beats/winlogbeat/sys/strings_test.go @@ -0,0 +1,100 @@ +package sys + +import ( + "bytes" + "encoding/binary" + "testing" + "unicode/utf16" + + "github.com/stretchr/testify/assert" +) + +func toUTF16Bytes(in string) []byte { + var u16 []uint16 = utf16.Encode([]rune(in)) + buf := &bytes.Buffer{} + binary.Write(buf, binary.LittleEndian, u16) + return buf.Bytes() +} + +func TestUTF16BytesToString(t *testing.T) { + input := "abc白鵬翔\u145A6" + utf16Bytes := toUTF16Bytes(input) + + output, _, err := UTF16BytesToString(utf16Bytes) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, input, output) +} + +func TestUTF16BytesToStringOffset(t *testing.T) { + in := bytes.Join([][]byte{toUTF16Bytes("one"), toUTF16Bytes("two"), toUTF16Bytes("three")}, []byte{0, 0}) + + output, offset, err := UTF16BytesToString(in) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, "one", output) + assert.Equal(t, 8, offset) + + in = in[offset:] + output, offset, err = UTF16BytesToString(in) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, "two", output) + assert.Equal(t, 8, offset) + + in = in[offset:] + output, offset, err = UTF16BytesToString(in) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, "three", output) + assert.Equal(t, -1, offset) +} + +func BenchmarkUTF16BytesToString(b *testing.B) { + utf16Bytes := toUTF16Bytes("A logon was attempted using explicit credentials.") + + b.Run("simple_string", func(b *testing.B) { + b.ResetTimer() + + for i := 0; i < b.N; i++ { + UTF16BytesToString(utf16Bytes) + } + }) + + // Buffer larger than the string. + b.Run("larger_buffer", func(b *testing.B) { + utf16Bytes = append(utf16Bytes, make([]byte, 2048)...) + b.ResetTimer() + + for i := 0; i < b.N; i++ { + UTF16BytesToString(utf16Bytes) + } + }) +} + +func TestUTF16ToUTF8(t *testing.T) { + input := "abc白鵬翔\u145A6" + utf16Bytes := toUTF16Bytes(input) + + outputBuf := &bytes.Buffer{} + err := UTF16ToUTF8Bytes(utf16Bytes, outputBuf) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, []byte(input), outputBuf.Bytes()) +} + +func BenchmarkUTF16ToUTF8(b *testing.B) { + utf16Bytes := toUTF16Bytes("A logon was attempted using explicit credentials.") + outputBuf := &bytes.Buffer{} + b.ResetTimer() + + for i := 0; i < b.N; i++ { + UTF16ToUTF8Bytes(utf16Bytes, outputBuf) + outputBuf.Reset() + } +} diff --git a/vendor/github.com/elastic/beats/winlogbeat/sys/wineventlog/wineventlog_windows.go b/vendor/github.com/elastic/beats/winlogbeat/sys/wineventlog/wineventlog_windows.go index 138c0da5..5da75e3a 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/sys/wineventlog/wineventlog_windows.go +++ b/vendor/github.com/elastic/beats/winlogbeat/sys/wineventlog/wineventlog_windows.go @@ -156,10 +156,11 @@ func RenderEvent( lang uint32, renderBuf []byte, pubHandleProvider func(string) sys.MessageFiles, -) (string, error) { + out io.Writer, +) error { providerName, err := evtRenderProviderName(renderBuf, eventHandle) if err != nil { - return "", err + return err } var publisherHandle uintptr @@ -173,21 +174,21 @@ func RenderEvent( } // Only a single string is returned when rendering XML. - xml, err := FormatEventString(EvtFormatMessageXml, - eventHandle, providerName, EvtHandle(publisherHandle), lang, renderBuf) + err = FormatEventString(EvtFormatMessageXml, + eventHandle, providerName, EvtHandle(publisherHandle), lang, renderBuf, out) // Recover by rendering the XML without the RenderingInfo (message string). if err != nil { // Do not try to recover from InsufficientBufferErrors because these // can be retried with a larger buffer. if _, ok := err.(sys.InsufficientBufferError); ok { - return "", err + return err } - xml, err = RenderEventXML(eventHandle, renderBuf) + err = RenderEventXML(eventHandle, renderBuf, out) } - return xml, err + return err } // RenderEventXML renders the event as XML. If the event is already rendered, as @@ -195,24 +196,23 @@ func RenderEvent( // include the RenderingInfo (message). If the event is not rendered then the // XML will not include the message, and in this case RenderEvent should be // used. -func RenderEventXML(eventHandle EvtHandle, renderBuf []byte) (string, error) { +func RenderEventXML(eventHandle EvtHandle, renderBuf []byte, out io.Writer) error { var bufferUsed, propertyCount uint32 err := _EvtRender(0, eventHandle, EvtRenderEventXml, uint32(len(renderBuf)), &renderBuf[0], &bufferUsed, &propertyCount) if err == ERROR_INSUFFICIENT_BUFFER { - return "", sys.InsufficientBufferError{err, int(bufferUsed)} + return sys.InsufficientBufferError{err, int(bufferUsed)} } if err != nil { - return "", err + return err } if int(bufferUsed) > len(renderBuf) { - return "", fmt.Errorf("Windows EvtRender reported that wrote %d bytes "+ + return fmt.Errorf("Windows EvtRender reported that wrote %d bytes "+ "to the buffer, but the buffer can only hold %d bytes", bufferUsed, len(renderBuf)) } - xml, _, err := sys.UTF16BytesToString(renderBuf[:bufferUsed]) - return xml, err + return sys.UTF16ToUTF8Bytes(renderBuf[:bufferUsed], out) } // CreateBookmark creates a new handle to a bookmark. Close must be called on @@ -299,24 +299,25 @@ func FormatEventString( publisherHandle EvtHandle, lang uint32, buffer []byte, -) (string, error) { + out io.Writer, +) error { // Open a publisher handle if one was not provided. ph := publisherHandle if ph == 0 { ph, err := OpenPublisherMetadata(0, publisher, 0) if err != nil { - return "", err + return err } defer _EvtClose(ph) } - // Create a buffer if one was not provider. + // Create a buffer if one was not provided. var bufferUsed uint32 if buffer == nil { err := _EvtFormatMessage(ph, eventHandle, 0, 0, 0, messageFlag, 0, nil, &bufferUsed) if err != nil && err != ERROR_INSUFFICIENT_BUFFER { - return "", err + return err } bufferUsed *= 2 @@ -328,16 +329,15 @@ func FormatEventString( uint32(len(buffer)/2), &buffer[0], &bufferUsed) bufferUsed *= 2 if err == ERROR_INSUFFICIENT_BUFFER { - return "", sys.InsufficientBufferError{err, int(bufferUsed)} + return sys.InsufficientBufferError{err, int(bufferUsed)} } if err != nil { - return "", err + return err } // This assumes there is only a single string value to read. This will // not work to read keys (when messageFlag == EvtFormatMessageKeyword). - value, _, err := sys.UTF16BytesToString(buffer[0:bufferUsed]) - return value, err + return sys.UTF16ToUTF8Bytes(buffer[:bufferUsed], out) } // offset reads a pointer value from the reader then calculates an offset from @@ -400,8 +400,7 @@ func readString(buffer []byte, reader io.Reader) (string, error) { func evtRenderProviderName(renderBuf []byte, eventHandle EvtHandle) (string, error) { var bufferUsed, propertyCount uint32 err := _EvtRender(providerNameContext, eventHandle, EvtRenderEventValues, - uint32(len(renderBuf)), &renderBuf[0], &bufferUsed, - &propertyCount) + uint32(len(renderBuf)), &renderBuf[0], &bufferUsed, &propertyCount) if err == ERROR_INSUFFICIENT_BUFFER { return "", sys.InsufficientBufferError{err, int(bufferUsed)} } diff --git a/vendor/github.com/elastic/beats/winlogbeat/winlogbeat.template-es2x.json b/vendor/github.com/elastic/beats/winlogbeat/winlogbeat.template-es2x.json index c3e7ebcd..938a4889 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/winlogbeat.template-es2x.json +++ b/vendor/github.com/elastic/beats/winlogbeat/winlogbeat.template-es2x.json @@ -7,7 +7,7 @@ } }, "_meta": { - "version": "5.1.2" + "version": "5.2.2" }, "dynamic_templates": [ { diff --git a/vendor/github.com/elastic/beats/winlogbeat/winlogbeat.template.json b/vendor/github.com/elastic/beats/winlogbeat/winlogbeat.template.json index a3ac73bf..48d00590 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/winlogbeat.template.json +++ b/vendor/github.com/elastic/beats/winlogbeat/winlogbeat.template.json @@ -5,7 +5,7 @@ "norms": false }, "_meta": { - "version": "5.1.2" + "version": "5.2.2" }, "dynamic_templates": [ { @@ -171,6 +171,7 @@ }, "order": 0, "settings": { + "index.mapping.total_fields.limit": 10000, "index.refresh_interval": "5s" }, "template": "winlogbeat-*" diff --git a/version.yml b/version.yml new file mode 100644 index 00000000..637accb9 --- /dev/null +++ b/version.yml @@ -0,0 +1 @@ +version: "0.2.0"