mirror of
https://github.com/Icinga/icingabeat.git
synced 2025-04-08 17:15:05 +02:00
Add initial files genereated by cookiecutter
This commit is contained in:
parent
f66a01fdda
commit
4d119a92a6
7
.gitignore
vendored
Normal file
7
.gitignore
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
/.idea
|
||||
/build
|
||||
|
||||
.DS_Store
|
||||
/icingabeat
|
||||
/icingabeat.test
|
||||
*.pyc
|
43
.travis.yml
Normal file
43
.travis.yml
Normal file
@ -0,0 +1,43 @@
|
||||
sudo: required
|
||||
dist: trusty
|
||||
services:
|
||||
- docker
|
||||
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.6
|
||||
|
||||
os:
|
||||
- linux
|
||||
- osx
|
||||
|
||||
env:
|
||||
matrix:
|
||||
- TARGETS="check"
|
||||
- TARGETS="testsuite"
|
||||
|
||||
global:
|
||||
# Cross-compile for amd64 only to speed up testing.
|
||||
- GOX_FLAGS="-arch amd64"
|
||||
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- python-virtualenv
|
||||
|
||||
before_install:
|
||||
# Redo the travis setup but with the elastic/libbeat path. This is needed so the package path is correct
|
||||
- mkdir -p $HOME/gopath/src/github.com/icinga/icingabeat/
|
||||
- rsync -az ${TRAVIS_BUILD_DIR}/ $HOME/gopath/src/github.com/icinga/icingabeat/
|
||||
- export TRAVIS_BUILD_DIR=$HOME/gopath/src/github.com/icinga/icingabeat/
|
||||
- cd $HOME/gopath/src/github.com/icinga/icingabeat/
|
||||
|
||||
install:
|
||||
- true
|
||||
|
||||
script:
|
||||
- make $TARGETS
|
||||
|
||||
after_success:
|
||||
# Copy full.cov to coverage.txt because codecov.io requires this file
|
0
CONTRIBUTING.md
Normal file
0
CONTRIBUTING.md
Normal file
45
Makefile
Normal file
45
Makefile
Normal file
@ -0,0 +1,45 @@
|
||||
BEATNAME=icingabeat
|
||||
BEAT_DIR=github.com/icinga/icingabeat
|
||||
SYSTEM_TESTS=false
|
||||
TEST_ENVIRONMENT=false
|
||||
ES_BEATS?=./vendor/github.com/elastic/beats
|
||||
GOPACKAGES=$(shell glide novendor)
|
||||
PREFIX?=.
|
||||
|
||||
# Path to the libbeat Makefile
|
||||
-include $(ES_BEATS)/libbeat/scripts/Makefile
|
||||
|
||||
# Initial beat setup
|
||||
.PHONY: setup
|
||||
setup: copy-vendor
|
||||
make update
|
||||
|
||||
# Copy beats into vendor directory
|
||||
.PHONY: copy-vendor
|
||||
copy-vendor:
|
||||
mkdir -p vendor/github.com/elastic/
|
||||
cp -R ${GOPATH}/src/github.com/elastic/beats vendor/github.com/elastic/
|
||||
rm -rf vendor/github.com/elastic/beats/.git
|
||||
|
||||
.PHONY: git-init
|
||||
git-init:
|
||||
git init
|
||||
git add README.md CONTRIBUTING.md
|
||||
git commit -m "Initial commit"
|
||||
git add LICENSE
|
||||
git commit -m "Add the LICENSE"
|
||||
git add .gitignore
|
||||
git commit -m "Add git settings"
|
||||
git add .
|
||||
git reset -- .travis.yml
|
||||
git commit -m "Add icingabeat"
|
||||
git add .travis.yml
|
||||
git commit -m "Add Travis CI"
|
||||
|
||||
# This is called by the beats packer before building starts
|
||||
.PHONY: before-build
|
||||
before-build:
|
||||
|
||||
# Collects all dependencies and then calls update
|
||||
.PHONY: collect
|
||||
collect:
|
119
README.md
Normal file
119
README.md
Normal file
@ -0,0 +1,119 @@
|
||||
# Icingabeat
|
||||
|
||||
Welcome to Icingabeat.
|
||||
|
||||
Ensure that this folder is at the following location:
|
||||
`${GOPATH}/github.com/icinga`
|
||||
|
||||
## Getting Started with Icingabeat
|
||||
|
||||
### Requirements
|
||||
|
||||
* [Golang](https://golang.org/dl/) 1.7
|
||||
|
||||
### Init Project
|
||||
To get running with Icingabeat and also install the
|
||||
dependencies, run the following command:
|
||||
|
||||
```
|
||||
make setup
|
||||
```
|
||||
|
||||
It will create a clean git history for each major step. Note that you can always rewrite the history if you wish before pushing your changes.
|
||||
|
||||
To push Icingabeat in the git repository, run the following commands:
|
||||
|
||||
```
|
||||
git remote set-url origin https://github.com/icinga/icingabeat
|
||||
git push origin master
|
||||
```
|
||||
|
||||
For further development, check out the [beat developer guide](https://www.elastic.co/guide/en/beats/libbeat/current/new-beat.html).
|
||||
|
||||
### Build
|
||||
|
||||
To build the binary for Icingabeat run the command below. This will generate a binary
|
||||
in the same directory with the name icingabeat.
|
||||
|
||||
```
|
||||
make
|
||||
```
|
||||
|
||||
|
||||
### Run
|
||||
|
||||
To run Icingabeat with debugging output enabled, run:
|
||||
|
||||
```
|
||||
./icingabeat -c icingabeat.yml -e -d "*"
|
||||
```
|
||||
|
||||
|
||||
### Test
|
||||
|
||||
To test Icingabeat, run the following command:
|
||||
|
||||
```
|
||||
make testsuite
|
||||
```
|
||||
|
||||
alternatively:
|
||||
```
|
||||
make unit-tests
|
||||
make system-tests
|
||||
make integration-tests
|
||||
make coverage-report
|
||||
```
|
||||
|
||||
The test coverage is reported in the folder `./build/coverage/`
|
||||
|
||||
### Update
|
||||
|
||||
Each beat has a template for the mapping in elasticsearch and a documentation for the fields
|
||||
which is automatically generated based on `etc/fields.yml`.
|
||||
To generate etc/icingabeat.template.json and etc/icingabeat.asciidoc
|
||||
|
||||
```
|
||||
make update
|
||||
```
|
||||
|
||||
|
||||
### Cleanup
|
||||
|
||||
To clean Icingabeat source code, run the following commands:
|
||||
|
||||
```
|
||||
make fmt
|
||||
make simplify
|
||||
```
|
||||
|
||||
To clean up the build directory and generated artifacts, run:
|
||||
|
||||
```
|
||||
make clean
|
||||
```
|
||||
|
||||
|
||||
### Clone
|
||||
|
||||
To clone Icingabeat from the git repository, run the following commands:
|
||||
|
||||
```
|
||||
mkdir -p ${GOPATH}/github.com/icinga
|
||||
cd ${GOPATH}/github.com/icinga
|
||||
git clone https://github.com/icinga/icingabeat
|
||||
```
|
||||
|
||||
|
||||
For further development, check out the [beat developer guide](https://www.elastic.co/guide/en/beats/libbeat/current/new-beat.html).
|
||||
|
||||
|
||||
## Packaging
|
||||
|
||||
The beat frameworks provides tools to crosscompile and package your beat for different platforms. This requires [docker](https://www.docker.com/) and vendoring as described above. To build packages of your beat, run the following command:
|
||||
|
||||
```
|
||||
make package
|
||||
```
|
||||
|
||||
This will fetch and create all images required for the build process. The hole process to finish can take several minutes.
|
7
_meta/beat.yml
Normal file
7
_meta/beat.yml
Normal file
@ -0,0 +1,7 @@
|
||||
################### Icingabeat Configuration Example #########################
|
||||
|
||||
############################# Icingabeat ######################################
|
||||
|
||||
icingabeat:
|
||||
# Defines how often an event is sent to the output
|
||||
period: 1s
|
9
_meta/fields.yml
Normal file
9
_meta/fields.yml
Normal file
@ -0,0 +1,9 @@
|
||||
- key: icingabeat
|
||||
title: icingabeat
|
||||
description:
|
||||
fields:
|
||||
- name: counter
|
||||
type: long
|
||||
required: true
|
||||
description: >
|
||||
PLEASE UPDATE DOCUMENTATION
|
6
_meta/kibana/index-pattern/icingabeat.json
Normal file
6
_meta/kibana/index-pattern/icingabeat.json
Normal file
@ -0,0 +1,6 @@
|
||||
{
|
||||
"fields": "[{\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"beat.name\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"beat.hostname\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"beat.version\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"@timestamp\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"date\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"tags\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"fields\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"meta.cloud.provider\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"meta.cloud.instance_id\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"meta.cloud.machine_type\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"meta.cloud.availability_zone\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"meta.cloud.project_id\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"meta.cloud.region\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"counter\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"number\", \"scripted\": false}]",
|
||||
"fieldFormatMap": "{\"@timestamp\": {\"id\": \"date\"}}",
|
||||
"timeFieldName": "@timestamp",
|
||||
"title": "icingabeat-*"
|
||||
}
|
60
beater/icingabeat.go
Normal file
60
beater/icingabeat.go
Normal file
@ -0,0 +1,60 @@
|
||||
package beater
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/elastic/beats/libbeat/beat"
|
||||
"github.com/elastic/beats/libbeat/common"
|
||||
"github.com/elastic/beats/libbeat/logp"
|
||||
"github.com/elastic/beats/libbeat/publisher"
|
||||
|
||||
"github.com/icinga/icingabeat/config"
|
||||
)
|
||||
|
||||
type Icingabeat struct {
|
||||
done chan struct{}
|
||||
config config.Config
|
||||
client publisher.Client
|
||||
}
|
||||
|
||||
// Creates beater
|
||||
func New(b *beat.Beat, cfg *common.Config) (beat.Beater, error) {
|
||||
config := config.DefaultConfig
|
||||
if err := cfg.Unpack(&config); err != nil {
|
||||
return nil, fmt.Errorf("Error reading config file: %v", err)
|
||||
}
|
||||
|
||||
bt := &Icingabeat{
|
||||
done: make(chan struct{}),
|
||||
config: config,
|
||||
}
|
||||
return bt, nil
|
||||
}
|
||||
|
||||
func (bt *Icingabeat) Run(b *beat.Beat) error {
|
||||
logp.Info("icingabeat is running! Hit CTRL-C to stop it.")
|
||||
|
||||
bt.client = b.Publisher.Connect()
|
||||
ticker := time.NewTicker(bt.config.Period)
|
||||
for {
|
||||
select {
|
||||
case <-bt.done:
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
}
|
||||
|
||||
event := common.MapStr{
|
||||
"@timestamp": common.Time(time.Now()),
|
||||
"type": b.Name,
|
||||
"event": "icingabeat to come here",
|
||||
}
|
||||
bt.client.PublishEvent(event)
|
||||
logp.Info("Event sent")
|
||||
}
|
||||
}
|
||||
|
||||
func (bt *Icingabeat) Stop() {
|
||||
bt.client.Close()
|
||||
close(bt.done)
|
||||
}
|
14
config/config.go
Normal file
14
config/config.go
Normal file
@ -0,0 +1,14 @@
|
||||
// Config is put into a different package to prevent cyclic imports in case
|
||||
// it is needed in several locations
|
||||
|
||||
package config
|
||||
|
||||
import "time"
|
||||
|
||||
type Config struct {
|
||||
Period time.Duration `config:"period"`
|
||||
}
|
||||
|
||||
var DefaultConfig = Config{
|
||||
Period: 1 * time.Second,
|
||||
}
|
3
config/config_test.go
Normal file
3
config/config_test.go
Normal file
@ -0,0 +1,3 @@
|
||||
// +build !integration
|
||||
|
||||
package config
|
139
docs/fields.asciidoc
Normal file
139
docs/fields.asciidoc
Normal file
@ -0,0 +1,139 @@
|
||||
|
||||
////
|
||||
This file is generated! See _meta/fields.yml and scripts/generate_field_docs.py
|
||||
////
|
||||
|
||||
[[exported-fields]]
|
||||
= Exported Fields
|
||||
|
||||
[partintro]
|
||||
|
||||
--
|
||||
This document describes the fields that are exported by Icingabeat. They are
|
||||
grouped in the following categories:
|
||||
|
||||
* <<exported-fields-beat>>
|
||||
* <<exported-fields-cloud>>
|
||||
* <<exported-fields-icingabeat>>
|
||||
|
||||
--
|
||||
[[exported-fields-beat]]
|
||||
== Beat Fields
|
||||
|
||||
Contains common beat fields available in all event types.
|
||||
|
||||
|
||||
|
||||
[float]
|
||||
=== beat.name
|
||||
|
||||
The name of the Beat sending the log messages. If the Beat name is set in the configuration file, then that value is used. If it is not set, the hostname is used. To set the Beat name, use the `name` option in the configuration file.
|
||||
|
||||
|
||||
[float]
|
||||
=== beat.hostname
|
||||
|
||||
The hostname as returned by the operating system on which the Beat is running.
|
||||
|
||||
|
||||
[float]
|
||||
=== beat.version
|
||||
|
||||
The version of the beat that generated this event.
|
||||
|
||||
|
||||
[float]
|
||||
=== @timestamp
|
||||
|
||||
type: date
|
||||
|
||||
example: August 26th 2016, 12:35:53.332
|
||||
|
||||
format: date
|
||||
|
||||
required: True
|
||||
|
||||
The timestamp when the event log record was generated.
|
||||
|
||||
|
||||
[float]
|
||||
=== tags
|
||||
|
||||
Arbitrary tags that can be set per Beat and per transaction type.
|
||||
|
||||
|
||||
[float]
|
||||
=== fields
|
||||
|
||||
type: dict
|
||||
|
||||
Contains user configurable fields.
|
||||
|
||||
|
||||
[[exported-fields-cloud]]
|
||||
== Cloud Provider Metadata Fields
|
||||
|
||||
Metadata from cloud providers added by the add_cloud_metadata processor.
|
||||
|
||||
|
||||
|
||||
[float]
|
||||
=== meta.cloud.provider
|
||||
|
||||
example: ec2
|
||||
|
||||
Name of the cloud provider. Possible values are ec2, gce, or digitalocean.
|
||||
|
||||
|
||||
[float]
|
||||
=== meta.cloud.instance_id
|
||||
|
||||
Instance ID of the host machine.
|
||||
|
||||
|
||||
[float]
|
||||
=== meta.cloud.machine_type
|
||||
|
||||
example: t2.medium
|
||||
|
||||
Machine type of the host machine.
|
||||
|
||||
|
||||
[float]
|
||||
=== meta.cloud.availability_zone
|
||||
|
||||
example: us-east-1c
|
||||
|
||||
Availability zone in which this host is running.
|
||||
|
||||
|
||||
[float]
|
||||
=== meta.cloud.project_id
|
||||
|
||||
example: project-x
|
||||
|
||||
Name of the project in Google Cloud.
|
||||
|
||||
|
||||
[float]
|
||||
=== meta.cloud.region
|
||||
|
||||
Region in which this host is running.
|
||||
|
||||
|
||||
[[exported-fields-icingabeat]]
|
||||
== icingabeat Fields
|
||||
|
||||
None
|
||||
|
||||
|
||||
[float]
|
||||
=== counter
|
||||
|
||||
type: long
|
||||
|
||||
required: True
|
||||
|
||||
PLEASE UPDATE DOCUMENTATION
|
||||
|
||||
|
5
docs/index.asciidoc
Normal file
5
docs/index.asciidoc
Normal file
@ -0,0 +1,5 @@
|
||||
= Icingabeat Docs
|
||||
|
||||
Welcome to the Icingabeat documentation.
|
||||
|
||||
|
2
docs/version.asciidoc
Normal file
2
docs/version.asciidoc
Normal file
@ -0,0 +1,2 @@
|
||||
:stack-version: 6.0.0-alpha1
|
||||
:doc-branch: master
|
596
icingabeat.full.yml
Normal file
596
icingabeat.full.yml
Normal file
@ -0,0 +1,596 @@
|
||||
################### Icingabeat Configuration Example #########################
|
||||
|
||||
############################# Icingabeat ######################################
|
||||
|
||||
icingabeat:
|
||||
# Defines how often an event is sent to the output
|
||||
period: 1s
|
||||
|
||||
#================================ General ======================================
|
||||
|
||||
# The name of the shipper that publishes the network data. It can be used to group
|
||||
# all the transactions sent by a single shipper in the web interface.
|
||||
# If this options is not defined, the hostname is used.
|
||||
#name:
|
||||
|
||||
# The tags of the shipper are included in their own field with each
|
||||
# transaction published. Tags make it easy to group servers by different
|
||||
# logical properties.
|
||||
#tags: ["service-X", "web-tier"]
|
||||
|
||||
# Optional fields that you can specify to add additional information to the
|
||||
# output. Fields can be scalar values, arrays, dictionaries, or any nested
|
||||
# combination of these.
|
||||
#fields:
|
||||
# env: staging
|
||||
|
||||
# If this option is set to true, the custom fields are stored as top-level
|
||||
# fields in the output document instead of being grouped under a fields
|
||||
# sub-dictionary. Default is false.
|
||||
#fields_under_root: false
|
||||
|
||||
# Internal queue size for single events in processing pipeline
|
||||
#queue_size: 1000
|
||||
|
||||
# The internal queue size for bulk events in the processing pipeline.
|
||||
# Do not modify this value.
|
||||
#bulk_queue_size: 0
|
||||
|
||||
# Sets the maximum number of CPUs that can be executing simultaneously. The
|
||||
# default is the number of logical CPUs available in the system.
|
||||
#max_procs:
|
||||
|
||||
#================================ Processors ===================================
|
||||
|
||||
# Processors are used to reduce the number of fields in the exported event or to
|
||||
# enhance the event with external metadata. This section defines a list of
|
||||
# processors that are applied one by one and the first one receives the initial
|
||||
# event:
|
||||
#
|
||||
# event -> filter1 -> event1 -> filter2 ->event2 ...
|
||||
#
|
||||
# The supported processors are drop_fields, drop_event, include_fields, and
|
||||
# add_cloud_metadata.
|
||||
#
|
||||
# For example, you can use the following processors to keep the fields that
|
||||
# contain CPU load percentages, but remove the fields that contain CPU ticks
|
||||
# values:
|
||||
#
|
||||
#processors:
|
||||
#- include_fields:
|
||||
# fields: ["cpu"]
|
||||
#- drop_fields:
|
||||
# fields: ["cpu.user", "cpu.system"]
|
||||
#
|
||||
# The following example drops the events that have the HTTP response code 200:
|
||||
#
|
||||
#processors:
|
||||
#- drop_event:
|
||||
# when:
|
||||
# equals:
|
||||
# http.code: 200
|
||||
#
|
||||
# The following example enriches each event with metadata from the cloud
|
||||
# provider about the host machine. It works on EC2, GCE, and DigitalOcean.
|
||||
#
|
||||
#processors:
|
||||
#- add_cloud_metadata:
|
||||
#
|
||||
|
||||
#================================ Outputs ======================================
|
||||
|
||||
# Configure what outputs to use when sending the data collected by the beat.
|
||||
# Multiple outputs may be used.
|
||||
|
||||
#-------------------------- Elasticsearch output -------------------------------
|
||||
output.elasticsearch:
|
||||
# Boolean flag to enable or disable the output module.
|
||||
#enabled: true
|
||||
|
||||
# Array of hosts to connect to.
|
||||
# Scheme and port can be left out and will be set to the default (http and 9200)
|
||||
# In case you specify and additional path, the scheme is required: http://localhost:9200/path
|
||||
# IPv6 addresses should always be defined as: https://[2001:db8::1]:9200
|
||||
hosts: ["localhost:9200"]
|
||||
|
||||
# Set gzip compression level.
|
||||
#compression_level: 0
|
||||
|
||||
# Optional protocol and basic auth credentials.
|
||||
#protocol: "https"
|
||||
#username: "elastic"
|
||||
#password: "changeme"
|
||||
|
||||
# Dictionary of HTTP parameters to pass within the url with index operations.
|
||||
#parameters:
|
||||
#param1: value1
|
||||
#param2: value2
|
||||
|
||||
# Number of workers per Elasticsearch host.
|
||||
#worker: 1
|
||||
|
||||
# Optional index name. The default is "icingabeat" plus date
|
||||
# and generates [icingabeat-]YYYY.MM.DD keys.
|
||||
#index: "icingabeat-%{+yyyy.MM.dd}"
|
||||
|
||||
# Optional ingest node pipeline. By default no pipeline will be used.
|
||||
#pipeline: ""
|
||||
|
||||
# Optional HTTP Path
|
||||
#path: "/elasticsearch"
|
||||
|
||||
# Proxy server url
|
||||
#proxy_url: http://proxy:3128
|
||||
|
||||
# The number of times a particular Elasticsearch index operation is attempted. If
|
||||
# the indexing operation doesn't succeed after this many retries, the events are
|
||||
# dropped. The default is 3.
|
||||
#max_retries: 3
|
||||
|
||||
# The maximum number of events to bulk in a single Elasticsearch bulk API index request.
|
||||
# The default is 50.
|
||||
#bulk_max_size: 50
|
||||
|
||||
# Configure http request timeout before failing an request to Elasticsearch.
|
||||
#timeout: 90
|
||||
|
||||
# The number of seconds to wait for new events between two bulk API index requests.
|
||||
# If `bulk_max_size` is reached before this interval expires, addition bulk index
|
||||
# requests are made.
|
||||
#flush_interval: 1s
|
||||
|
||||
# A template is used to set the mapping in Elasticsearch
|
||||
# By default template loading is enabled and the template is loaded.
|
||||
# These settings can be adjusted to load your own template or overwrite existing ones.
|
||||
|
||||
# Set to false to disable template loading.
|
||||
#template.enabled: true
|
||||
|
||||
# Template name. By default the template name is icingabeat.
|
||||
#template.name: "icingabeat"
|
||||
|
||||
# Path to template file
|
||||
#template.path: "${path.config}/icingabeat.template.json"
|
||||
|
||||
# Overwrite existing template
|
||||
#template.overwrite: false
|
||||
|
||||
# If set to true, icingabeat checks the Elasticsearch version at connect time, and if it
|
||||
# is 2.x, it loads the file specified by the template.versions.2x.path setting. The
|
||||
# default is true.
|
||||
#template.versions.2x.enabled: true
|
||||
|
||||
# Path to the Elasticsearch 2.x version of the template file.
|
||||
#template.versions.2x.path: "${path.config}/icingabeat.template-es2x.json"
|
||||
|
||||
# Use SSL settings for HTTPS. Default is true.
|
||||
#ssl.enabled: true
|
||||
|
||||
# Configure SSL verification mode. If `none` is configured, all server hosts
|
||||
# and certificates will be accepted. In this mode, SSL based connections are
|
||||
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
|
||||
# `full`.
|
||||
#ssl.verification_mode: full
|
||||
|
||||
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
|
||||
# 1.2 are enabled.
|
||||
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
|
||||
|
||||
# SSL configuration. By default is off.
|
||||
# List of root certificates for HTTPS server verifications
|
||||
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
|
||||
|
||||
# Certificate for SSL client authentication
|
||||
#ssl.certificate: "/etc/pki/client/cert.pem"
|
||||
|
||||
# Client Certificate Key
|
||||
#ssl.key: "/etc/pki/client/cert.key"
|
||||
|
||||
# Optional passphrase for decrypting the Certificate Key.
|
||||
#ssl.key_passphrase: ''
|
||||
|
||||
# Configure cipher suites to be used for SSL connections
|
||||
#ssl.cipher_suites: []
|
||||
|
||||
# Configure curve types for ECDHE based cipher suites
|
||||
#ssl.curve_types: []
|
||||
|
||||
|
||||
#----------------------------- Logstash output ---------------------------------
|
||||
#output.logstash:
|
||||
# Boolean flag to enable or disable the output module.
|
||||
#enabled: true
|
||||
|
||||
# The Logstash hosts
|
||||
#hosts: ["localhost:5044"]
|
||||
|
||||
# Number of workers per Logstash host.
|
||||
#worker: 1
|
||||
|
||||
# Set gzip compression level.
|
||||
#compression_level: 3
|
||||
|
||||
# Optional load balance the events between the Logstash hosts
|
||||
#loadbalance: true
|
||||
|
||||
# Number of batches to be send asynchronously to logstash while processing
|
||||
# new batches.
|
||||
#pipelining: 0
|
||||
|
||||
# Optional index name. The default index name is set to name of the beat
|
||||
# in all lowercase.
|
||||
#index: 'icingabeat'
|
||||
|
||||
# SOCKS5 proxy server URL
|
||||
#proxy_url: socks5://user:password@socks5-server:2233
|
||||
|
||||
# Resolve names locally when using a proxy server. Defaults to false.
|
||||
#proxy_use_local_resolver: false
|
||||
|
||||
# Enable SSL support. SSL is automatically enabled, if any SSL setting is set.
|
||||
#ssl.enabled: true
|
||||
|
||||
# Configure SSL verification mode. If `none` is configured, all server hosts
|
||||
# and certificates will be accepted. In this mode, SSL based connections are
|
||||
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
|
||||
# `full`.
|
||||
#ssl.verification_mode: full
|
||||
|
||||
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
|
||||
# 1.2 are enabled.
|
||||
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
|
||||
|
||||
# Optional SSL configuration options. SSL is off by default.
|
||||
# List of root certificates for HTTPS server verifications
|
||||
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
|
||||
|
||||
# Certificate for SSL client authentication
|
||||
#ssl.certificate: "/etc/pki/client/cert.pem"
|
||||
|
||||
# Client Certificate Key
|
||||
#ssl.key: "/etc/pki/client/cert.key"
|
||||
|
||||
# Optional passphrase for decrypting the Certificate Key.
|
||||
#ssl.key_passphrase: ''
|
||||
|
||||
# Configure cipher suites to be used for SSL connections
|
||||
#ssl.cipher_suites: []
|
||||
|
||||
# Configure curve types for ECDHE based cipher suites
|
||||
#ssl.curve_types: []
|
||||
|
||||
#------------------------------- Kafka output ----------------------------------
|
||||
#output.kafka:
|
||||
# Boolean flag to enable or disable the output module.
|
||||
#enabled: true
|
||||
|
||||
# The list of Kafka broker addresses from where to fetch the cluster metadata.
|
||||
# The cluster metadata contain the actual Kafka brokers events are published
|
||||
# to.
|
||||
#hosts: ["localhost:9092"]
|
||||
|
||||
# The Kafka topic used for produced events. The setting can be a format string
|
||||
# using any event field. To set the topic from document type use `%{[type]}`.
|
||||
#topic: beats
|
||||
|
||||
# The Kafka event key setting. Use format string to create unique event key.
|
||||
# By default no event key will be generated.
|
||||
#key: ''
|
||||
|
||||
# The Kafka event partitioning strategy. Default hashing strategy is `hash`
|
||||
# using the `output.kafka.key` setting or randomly distributes events if
|
||||
# `output.kafka.key` is not configured.
|
||||
#partition.hash:
|
||||
# If enabled, events will only be published to partitions with reachable
|
||||
# leaders. Default is false.
|
||||
#reachable_only: false
|
||||
|
||||
# Configure alternative event field names used to compute the hash value.
|
||||
# If empty `output.kafka.key` setting will be used.
|
||||
# Default value is empty list.
|
||||
#hash: []
|
||||
|
||||
# Authentication details. Password is required if username is set.
|
||||
#username: ''
|
||||
#password: ''
|
||||
|
||||
# Kafka version icingabeat is assumed to run against. Defaults to the oldest
|
||||
# supported stable version (currently version 0.8.2.0)
|
||||
#version: 0.8.2
|
||||
|
||||
# Metadata update configuration. Metadata do contain leader information
|
||||
# deciding which broker to use when publishing.
|
||||
#metadata:
|
||||
# Max metadata request retry attempts when cluster is in middle of leader
|
||||
# election. Defaults to 3 retries.
|
||||
#retry.max: 3
|
||||
|
||||
# Waiting time between retries during leader elections. Default is 250ms.
|
||||
#retry.backoff: 250ms
|
||||
|
||||
# Refresh metadata interval. Defaults to every 10 minutes.
|
||||
#refresh_frequency: 10m
|
||||
|
||||
# The number of concurrent load-balanced Kafka output workers.
|
||||
#worker: 1
|
||||
|
||||
# The number of times to retry publishing an event after a publishing failure.
|
||||
# After the specified number of retries, the events are typically dropped.
|
||||
# Some Beats, such as Filebeat, ignore the max_retries setting and retry until
|
||||
# all events are published. Set max_retries to a value less than 0 to retry
|
||||
# until all events are published. The default is 3.
|
||||
#max_retries: 3
|
||||
|
||||
# The maximum number of events to bulk in a single Kafka request. The default
|
||||
# is 2048.
|
||||
#bulk_max_size: 2048
|
||||
|
||||
# The number of seconds to wait for responses from the Kafka brokers before
|
||||
# timing out. The default is 30s.
|
||||
#timeout: 30s
|
||||
|
||||
# The maximum duration a broker will wait for number of required ACKs. The
|
||||
# default is 10s.
|
||||
#broker_timeout: 10s
|
||||
|
||||
# The number of messages buffered for each Kafka broker. The default is 256.
|
||||
#channel_buffer_size: 256
|
||||
|
||||
# The keep-alive period for an active network connection. If 0s, keep-alives
|
||||
# are disabled. The default is 0 seconds.
|
||||
#keep_alive: 0
|
||||
|
||||
# Sets the output compression codec. Must be one of none, snappy and gzip. The
|
||||
# default is gzip.
|
||||
#compression: gzip
|
||||
|
||||
# The maximum permitted size of JSON-encoded messages. Bigger messages will be
|
||||
# dropped. The default value is 1000000 (bytes). This value should be equal to
|
||||
# or less than the broker's message.max.bytes.
|
||||
#max_message_bytes: 1000000
|
||||
|
||||
# The ACK reliability level required from broker. 0=no response, 1=wait for
|
||||
# local commit, -1=wait for all replicas to commit. The default is 1. Note:
|
||||
# If set to 0, no ACKs are returned by Kafka. Messages might be lost silently
|
||||
# on error.
|
||||
#required_acks: 1
|
||||
|
||||
# The number of seconds to wait for new events between two producer API calls.
|
||||
#flush_interval: 1s
|
||||
|
||||
# The configurable ClientID used for logging, debugging, and auditing
|
||||
# purposes. The default is "beats".
|
||||
#client_id: beats
|
||||
|
||||
# Enable SSL support. SSL is automatically enabled, if any SSL setting is set.
|
||||
#ssl.enabled: true
|
||||
|
||||
# Optional SSL configuration options. SSL is off by default.
|
||||
# List of root certificates for HTTPS server verifications
|
||||
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
|
||||
|
||||
# Configure SSL verification mode. If `none` is configured, all server hosts
|
||||
# and certificates will be accepted. In this mode, SSL based connections are
|
||||
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
|
||||
# `full`.
|
||||
#ssl.verification_mode: full
|
||||
|
||||
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
|
||||
# 1.2 are enabled.
|
||||
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
|
||||
|
||||
# Certificate for SSL client authentication
|
||||
#ssl.certificate: "/etc/pki/client/cert.pem"
|
||||
|
||||
# Client Certificate Key
|
||||
#ssl.key: "/etc/pki/client/cert.key"
|
||||
|
||||
# Optional passphrase for decrypting the Certificate Key.
|
||||
#ssl.key_passphrase: ''
|
||||
|
||||
# Configure cipher suites to be used for SSL connections
|
||||
#ssl.cipher_suites: []
|
||||
|
||||
# Configure curve types for ECDHE based cipher suites
|
||||
#ssl.curve_types: []
|
||||
|
||||
#------------------------------- Redis output ----------------------------------
|
||||
#output.redis:
|
||||
# Boolean flag to enable or disable the output module.
|
||||
#enabled: true
|
||||
|
||||
# The list of Redis servers to connect to. If load balancing is enabled, the
|
||||
# events are distributed to the servers in the list. If one server becomes
|
||||
# unreachable, the events are distributed to the reachable servers only.
|
||||
#hosts: ["localhost:6379"]
|
||||
|
||||
# The Redis port to use if hosts does not contain a port number. The default
|
||||
# is 6379.
|
||||
#port: 6379
|
||||
|
||||
# The name of the Redis list or channel the events are published to. The
|
||||
# default is icingabeat.
|
||||
#key: icingabeat
|
||||
|
||||
# The password to authenticate with. The default is no authentication.
|
||||
#password:
|
||||
|
||||
# The Redis database number where the events are published. The default is 0.
|
||||
#db: 0
|
||||
|
||||
# The Redis data type to use for publishing events. If the data type is list,
|
||||
# the Redis RPUSH command is used. If the data type is channel, the Redis
|
||||
# PUBLISH command is used. The default value is list.
|
||||
#datatype: list
|
||||
|
||||
# The number of workers to use for each host configured to publish events to
|
||||
# Redis. Use this setting along with the loadbalance option. For example, if
|
||||
# you have 2 hosts and 3 workers, in total 6 workers are started (3 for each
|
||||
# host).
|
||||
#worker: 1
|
||||
|
||||
# If set to true and multiple hosts or workers are configured, the output
|
||||
# plugin load balances published events onto all Redis hosts. If set to false,
|
||||
# the output plugin sends all events to only one host (determined at random)
|
||||
# and will switch to another host if the currently selected one becomes
|
||||
# unreachable. The default value is true.
|
||||
#loadbalance: true
|
||||
|
||||
# The Redis connection timeout in seconds. The default is 5 seconds.
|
||||
#timeout: 5s
|
||||
|
||||
# The number of times to retry publishing an event after a publishing failure.
|
||||
# After the specified number of retries, the events are typically dropped.
|
||||
# Some Beats, such as Filebeat, ignore the max_retries setting and retry until
|
||||
# all events are published. Set max_retries to a value less than 0 to retry
|
||||
# until all events are published. The default is 3.
|
||||
#max_retries: 3
|
||||
|
||||
# The maximum number of events to bulk in a single Redis request or pipeline.
|
||||
# The default is 2048.
|
||||
#bulk_max_size: 2048
|
||||
|
||||
# The URL of the SOCKS5 proxy to use when connecting to the Redis servers. The
|
||||
# value must be a URL with a scheme of socks5://.
|
||||
#proxy_url:
|
||||
|
||||
# This option determines whether Redis hostnames are resolved locally when
|
||||
# using a proxy. The default value is false, which means that name resolution
|
||||
# occurs on the proxy server.
|
||||
#proxy_use_local_resolver: false
|
||||
|
||||
# Enable SSL support. SSL is automatically enabled, if any SSL setting is set.
|
||||
#ssl.enabled: true
|
||||
|
||||
# Configure SSL verification mode. If `none` is configured, all server hosts
|
||||
# and certificates will be accepted. In this mode, SSL based connections are
|
||||
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
|
||||
# `full`.
|
||||
#ssl.verification_mode: full
|
||||
|
||||
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
|
||||
# 1.2 are enabled.
|
||||
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
|
||||
|
||||
# Optional SSL configuration options. SSL is off by default.
|
||||
# List of root certificates for HTTPS server verifications
|
||||
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
|
||||
|
||||
# Certificate for SSL client authentication
|
||||
#ssl.certificate: "/etc/pki/client/cert.pem"
|
||||
|
||||
# Client Certificate Key
|
||||
#ssl.key: "/etc/pki/client/cert.key"
|
||||
|
||||
# Optional passphrase for decrypting the Certificate Key.
|
||||
#ssl.key_passphrase: ''
|
||||
|
||||
# Configure cipher suites to be used for SSL connections
|
||||
#ssl.cipher_suites: []
|
||||
|
||||
# Configure curve types for ECDHE based cipher suites
|
||||
#ssl.curve_types: []
|
||||
|
||||
|
||||
#------------------------------- File output -----------------------------------
|
||||
#output.file:
|
||||
# Boolean flag to enable or disable the output module.
|
||||
#enabled: true
|
||||
|
||||
# Path to the directory where to save the generated files. The option is
|
||||
# mandatory.
|
||||
#path: "/tmp/icingabeat"
|
||||
|
||||
# Name of the generated files. The default is `icingabeat` and it generates
|
||||
# files: `icingabeat`, `icingabeat.1`, `icingabeat.2`, etc.
|
||||
#filename: icingabeat
|
||||
|
||||
# Maximum size in kilobytes of each file. When this size is reached, and on
|
||||
# every icingabeat restart, the files are rotated. The default value is 10240
|
||||
# kB.
|
||||
#rotate_every_kb: 10000
|
||||
|
||||
# Maximum number of files under path. When this number of files is reached,
|
||||
# the oldest file is deleted and the rest are shifted from last to first. The
|
||||
# default is 7 files.
|
||||
#number_of_files: 7
|
||||
|
||||
|
||||
#----------------------------- Console output ---------------------------------
|
||||
#output.console:
|
||||
# Boolean flag to enable or disable the output module.
|
||||
#enabled: true
|
||||
|
||||
# Pretty print json event
|
||||
#pretty: false
|
||||
|
||||
#================================= Paths ======================================
|
||||
|
||||
# The home path for the icingabeat installation. This is the default base path
|
||||
# for all other path settings and for miscellaneous files that come with the
|
||||
# distribution (for example, the sample dashboards).
|
||||
# If not set by a CLI flag or in the configuration file, the default for the
|
||||
# home path is the location of the binary.
|
||||
#path.home:
|
||||
|
||||
# The configuration path for the icingabeat installation. This is the default
|
||||
# base path for configuration files, including the main YAML configuration file
|
||||
# and the Elasticsearch template file. If not set by a CLI flag or in the
|
||||
# configuration file, the default for the configuration path is the home path.
|
||||
#path.config: ${path.home}
|
||||
|
||||
# The data path for the icingabeat installation. This is the default base path
|
||||
# for all the files in which icingabeat needs to store its data. If not set by a
|
||||
# CLI flag or in the configuration file, the default for the data path is a data
|
||||
# subdirectory inside the home path.
|
||||
#path.data: ${path.home}/data
|
||||
|
||||
# The logs path for a icingabeat installation. This is the default location for
|
||||
# the Beat's log files. If not set by a CLI flag or in the configuration file,
|
||||
# the default for the logs path is a logs subdirectory inside the home path.
|
||||
#path.logs: ${path.home}/logs
|
||||
|
||||
#================================ Logging ======================================
|
||||
# There are three options for the log output: syslog, file, stderr.
|
||||
# Under Windows systems, the log files are per default sent to the file output,
|
||||
# under all other system per default to syslog.
|
||||
|
||||
# Sets log level. The default log level is info.
|
||||
# Available log levels are: critical, error, warning, info, debug
|
||||
#logging.level: info
|
||||
|
||||
# Enable debug output for selected components. To enable all selectors use ["*"]
|
||||
# Other available selectors are "beat", "publish", "service"
|
||||
# Multiple selectors can be chained.
|
||||
#logging.selectors: [ ]
|
||||
|
||||
# Send all logging output to syslog. The default is false.
|
||||
#logging.to_syslog: true
|
||||
|
||||
# If enabled, icingabeat periodically logs its internal metrics that have changed
|
||||
# in the last period. For each metric that changed, the delta from the value at
|
||||
# the beginning of the period is logged. Also, the total values for
|
||||
# all non-zero internal metrics are logged on shutdown. The default is true.
|
||||
#logging.metrics.enabled: true
|
||||
|
||||
# The period after which to log the internal metrics. The default is 30s.
|
||||
#logging.metrics.period: 30s
|
||||
|
||||
# Logging to rotating files files. Set logging.to_files to false to disable logging to
|
||||
# files.
|
||||
logging.to_files: true
|
||||
logging.files:
|
||||
# Configure the path where the logs are written. The default is the logs directory
|
||||
# under the home path (the binary location).
|
||||
#path: /var/log/icingabeat
|
||||
|
||||
# The name of the files where the logs are written to.
|
||||
#name: icingabeat
|
||||
|
||||
# Configure log file size limit. If limit is reached, log file will be
|
||||
# automatically rotated
|
||||
#rotateeverybytes: 10485760 # = 10MB
|
||||
|
||||
# Number of rotated log files to keep. Oldest files will be deleted first.
|
||||
#keepfiles: 7
|
||||
|
101
icingabeat.template-es2x.json
Normal file
101
icingabeat.template-es2x.json
Normal file
@ -0,0 +1,101 @@
|
||||
{
|
||||
"mappings": {
|
||||
"_default_": {
|
||||
"_all": {
|
||||
"norms": {
|
||||
"enabled": false
|
||||
}
|
||||
},
|
||||
"_meta": {
|
||||
"version": "6.0.0-alpha1"
|
||||
},
|
||||
"dynamic_templates": [
|
||||
{
|
||||
"strings_as_keyword": {
|
||||
"mapping": {
|
||||
"ignore_above": 1024,
|
||||
"index": "not_analyzed",
|
||||
"type": "string"
|
||||
},
|
||||
"match_mapping_type": "string"
|
||||
}
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"@timestamp": {
|
||||
"type": "date"
|
||||
},
|
||||
"beat": {
|
||||
"properties": {
|
||||
"hostname": {
|
||||
"ignore_above": 1024,
|
||||
"index": "not_analyzed",
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"ignore_above": 1024,
|
||||
"index": "not_analyzed",
|
||||
"type": "string"
|
||||
},
|
||||
"version": {
|
||||
"ignore_above": 1024,
|
||||
"index": "not_analyzed",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"counter": {
|
||||
"type": "long"
|
||||
},
|
||||
"meta": {
|
||||
"properties": {
|
||||
"cloud": {
|
||||
"properties": {
|
||||
"availability_zone": {
|
||||
"ignore_above": 1024,
|
||||
"index": "not_analyzed",
|
||||
"type": "string"
|
||||
},
|
||||
"instance_id": {
|
||||
"ignore_above": 1024,
|
||||
"index": "not_analyzed",
|
||||
"type": "string"
|
||||
},
|
||||
"machine_type": {
|
||||
"ignore_above": 1024,
|
||||
"index": "not_analyzed",
|
||||
"type": "string"
|
||||
},
|
||||
"project_id": {
|
||||
"ignore_above": 1024,
|
||||
"index": "not_analyzed",
|
||||
"type": "string"
|
||||
},
|
||||
"provider": {
|
||||
"ignore_above": 1024,
|
||||
"index": "not_analyzed",
|
||||
"type": "string"
|
||||
},
|
||||
"region": {
|
||||
"ignore_above": 1024,
|
||||
"index": "not_analyzed",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"tags": {
|
||||
"ignore_above": 1024,
|
||||
"index": "not_analyzed",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"order": 0,
|
||||
"settings": {
|
||||
"index.refresh_interval": "5s"
|
||||
},
|
||||
"template": "icingabeat-*"
|
||||
}
|
88
icingabeat.template.json
Normal file
88
icingabeat.template.json
Normal file
@ -0,0 +1,88 @@
|
||||
{
|
||||
"mappings": {
|
||||
"_default_": {
|
||||
"_all": {
|
||||
"norms": false
|
||||
},
|
||||
"_meta": {
|
||||
"version": "6.0.0-alpha1"
|
||||
},
|
||||
"dynamic_templates": [
|
||||
{
|
||||
"strings_as_keyword": {
|
||||
"mapping": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"match_mapping_type": "string"
|
||||
}
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"@timestamp": {
|
||||
"type": "date"
|
||||
},
|
||||
"beat": {
|
||||
"properties": {
|
||||
"hostname": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"name": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"version": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"counter": {
|
||||
"type": "long"
|
||||
},
|
||||
"meta": {
|
||||
"properties": {
|
||||
"cloud": {
|
||||
"properties": {
|
||||
"availability_zone": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"instance_id": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"machine_type": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"project_id": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"provider": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"region": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"tags": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"order": 0,
|
||||
"settings": {
|
||||
"index.refresh_interval": "5s"
|
||||
},
|
||||
"template": "icingabeat-*"
|
||||
}
|
63
icingabeat.yml
Normal file
63
icingabeat.yml
Normal file
@ -0,0 +1,63 @@
|
||||
################### Icingabeat Configuration Example #########################
|
||||
|
||||
############################# Icingabeat ######################################
|
||||
|
||||
icingabeat:
|
||||
# Defines how often an event is sent to the output
|
||||
period: 1s
|
||||
|
||||
#================================ General =====================================
|
||||
|
||||
# The name of the shipper that publishes the network data. It can be used to group
|
||||
# all the transactions sent by a single shipper in the web interface.
|
||||
#name:
|
||||
|
||||
# The tags of the shipper are included in their own field with each
|
||||
# transaction published.
|
||||
#tags: ["service-X", "web-tier"]
|
||||
|
||||
# Optional fields that you can specify to add additional information to the
|
||||
# output.
|
||||
#fields:
|
||||
# env: staging
|
||||
|
||||
#================================ Outputs =====================================
|
||||
|
||||
# Configure what outputs to use when sending the data collected by the beat.
|
||||
# Multiple outputs may be used.
|
||||
|
||||
#-------------------------- Elasticsearch output ------------------------------
|
||||
output.elasticsearch:
|
||||
# Array of hosts to connect to.
|
||||
hosts: ["localhost:9200"]
|
||||
|
||||
# Optional protocol and basic auth credentials.
|
||||
#protocol: "https"
|
||||
#username: "elastic"
|
||||
#password: "changeme"
|
||||
|
||||
#----------------------------- Logstash output --------------------------------
|
||||
#output.logstash:
|
||||
# The Logstash hosts
|
||||
#hosts: ["localhost:5044"]
|
||||
|
||||
# Optional SSL. By default is off.
|
||||
# List of root certificates for HTTPS server verifications
|
||||
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
|
||||
|
||||
# Certificate for SSL client authentication
|
||||
#ssl.certificate: "/etc/pki/client/cert.pem"
|
||||
|
||||
# Client Certificate Key
|
||||
#ssl.key: "/etc/pki/client/cert.key"
|
||||
|
||||
#================================ Logging =====================================
|
||||
|
||||
# Sets log level. The default log level is info.
|
||||
# Available log levels are: critical, error, warning, info, debug
|
||||
#logging.level: debug
|
||||
|
||||
# At debug level, you can selectively enable logging only for some components.
|
||||
# To enable all selectors use ["*"]. Examples of other selectors are "beat",
|
||||
# "publish", "service".
|
||||
#logging.selectors: ["*"]
|
27
logs/icingabeat
Normal file
27
logs/icingabeat
Normal file
@ -0,0 +1,27 @@
|
||||
2016-12-02T11:47:19+01:00 INFO Setup Beat: icingabeat; Version: 6.0.0-alpha1
|
||||
2016-12-02T11:47:19+01:00 INFO Metrics logging every 30s
|
||||
2016-12-02T11:47:19+01:00 INFO Loading template enabled. Reading template file: /Users/bsheqa/go/src/github.com/icinga/icingabeat/icingabeat.template.json
|
||||
2016-12-02T11:47:19+01:00 INFO Loading template enabled for Elasticsearch 2.x. Reading template file: /Users/bsheqa/go/src/github.com/icinga/icingabeat/icingabeat.template-es2x.json
|
||||
2016-12-02T11:47:19+01:00 INFO Elasticsearch url: http://localhost:9200
|
||||
2016-12-02T11:47:19+01:00 INFO Activated elasticsearch as output plugin.
|
||||
2016-12-02T11:47:19+01:00 INFO Publisher name: Blerims-MacBook-Pro.local
|
||||
2016-12-02T11:47:19+01:00 INFO Flush Interval set to: 1s
|
||||
2016-12-02T11:47:19+01:00 INFO Max Bulk Size set to: 50
|
||||
2016-12-02T11:47:19+01:00 INFO icingabeat start running.
|
||||
2016-12-02T11:47:19+01:00 INFO icingabeat is running! Hit CTRL-C to stop it.
|
||||
2016-12-02T11:47:20+01:00 INFO Event sent
|
||||
2016-12-02T11:47:21+01:00 INFO Event sent
|
||||
2016-12-02T11:47:21+01:00 ERR Connecting error publishing events (retrying): Get http://localhost:9200: dial tcp 127.0.0.1:9200: getsockopt: connection refused
|
||||
2016-12-02T11:47:22+01:00 INFO Event sent
|
||||
2016-12-02T11:47:22+01:00 ERR Connecting error publishing events (retrying): Get http://localhost:9200: dial tcp 127.0.0.1:9200: getsockopt: connection refused
|
||||
2016-12-02T11:47:23+01:00 INFO Event sent
|
||||
2016-12-02T11:47:24+01:00 INFO Event sent
|
||||
2016-12-02T11:47:24+01:00 ERR Connecting error publishing events (retrying): Get http://localhost:9200: dial tcp 127.0.0.1:9200: getsockopt: connection refused
|
||||
2016-12-02T11:47:25+01:00 INFO Event sent
|
||||
2016-12-02T11:47:26+01:00 INFO Event sent
|
||||
2016-12-02T11:47:27+01:00 INFO Event sent
|
||||
2016-12-02T11:47:28+01:00 INFO Event sent
|
||||
2016-12-02T11:47:28+01:00 ERR Connecting error publishing events (retrying): Get http://localhost:9200: dial tcp 127.0.0.1:9200: getsockopt: connection refused
|
||||
2016-12-02T11:47:29+01:00 INFO Total non-zero values: libbeat.publisher.messages_in_worker_queues=9 libbeat.publisher.published_events=9
|
||||
2016-12-02T11:47:29+01:00 INFO Uptime: 9.720728469s
|
||||
2016-12-02T11:47:29+01:00 INFO icingabeat stopped.
|
16
main.go
Normal file
16
main.go
Normal file
@ -0,0 +1,16 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/elastic/beats/libbeat/beat"
|
||||
|
||||
"github.com/icinga/icingabeat/beater"
|
||||
)
|
||||
|
||||
func main() {
|
||||
err := beat.Run("icingabeat", "", beater.New)
|
||||
if err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
22
main_test.go
Normal file
22
main_test.go
Normal file
@ -0,0 +1,22 @@
|
||||
package main
|
||||
|
||||
// This file is mandatory as otherwise the icingabeat.test binary is not generated correctly.
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var systemTest *bool
|
||||
|
||||
func init() {
|
||||
systemTest = flag.Bool("systemTest", false, "Set to true when running system tests")
|
||||
}
|
||||
|
||||
// Test started when the test binary is started. Only calls main.
|
||||
func TestSystem(t *testing.T) {
|
||||
|
||||
if *systemTest {
|
||||
main()
|
||||
}
|
||||
}
|
78
tests/system/config/icingabeat.yml.j2
Normal file
78
tests/system/config/icingabeat.yml.j2
Normal file
@ -0,0 +1,78 @@
|
||||
################### Beat Configuration #########################
|
||||
|
||||
|
||||
|
||||
############################# Output ##########################################
|
||||
|
||||
# Configure what outputs to use when sending the data collected by the beat.
|
||||
# You can enable one or multiple outputs by setting enabled option to true.
|
||||
output:
|
||||
|
||||
### File as output
|
||||
file:
|
||||
# Enabling file output
|
||||
enabled: true
|
||||
|
||||
# Path to the directory where to save the generated files. The option is mandatory.
|
||||
path: {{ output_file_path|default(beat.working_dir + "/output") }}
|
||||
|
||||
|
||||
# Name of the generated files. The default is `icingabeat` and it generates
|
||||
# files: `icingabeat`, `icingabeat.1`, `icingabeat.2`, etc.
|
||||
filename: "{{ output_file_filename|default("icingabeat") }}"
|
||||
|
||||
# Maximum size in kilobytes of each file. When this size is reached, the files are
|
||||
# rotated. The default value is 10 MB.
|
||||
#rotate_every_kb: 10000
|
||||
|
||||
# Maximum number of files under path. When this number of files is reached, the
|
||||
# oldest file is deleted and the rest are shifted from last to first. The default
|
||||
# is 7 files.
|
||||
#number_of_files: 7
|
||||
|
||||
|
||||
|
||||
############################# Beat #########################################
|
||||
|
||||
# The name of the shipper that publishes the network data. It can be used to group
|
||||
# all the transactions sent by a single shipper in the web interface.
|
||||
# If this options is not defined, the hostname is used.
|
||||
#name:
|
||||
|
||||
# The tags of the shipper are included in their own field with each
|
||||
# transaction published. Tags make it easy to group servers by different
|
||||
# logical properties.
|
||||
#tags: ["service-X", "web-tier"]
|
||||
|
||||
|
||||
|
||||
############################# Logging #########################################
|
||||
|
||||
#logging:
|
||||
# Send all logging output to syslog. On Windows default is false, otherwise
|
||||
# default is true.
|
||||
#to_syslog: true
|
||||
|
||||
# Write all logging output to files. Beats automatically rotate files if configurable
|
||||
# limit is reached.
|
||||
#to_files: false
|
||||
|
||||
# Enable debug output for selected components.
|
||||
#selectors: []
|
||||
|
||||
# Set log level
|
||||
#level: error
|
||||
|
||||
#files:
|
||||
# The directory where the log files will written to.
|
||||
#path: /var/log/icingabeat
|
||||
|
||||
# The name of the files where the logs are written to.
|
||||
#name: icingabeat
|
||||
|
||||
# Configure log file size limit. If limit is reached, log file will be
|
||||
# automatically rotated
|
||||
#rotateeverybytes: 10485760 # = 10MB
|
||||
|
||||
# Number of rotated log files to keep. Oldest files will be deleted first.
|
||||
#keepfiles: 7
|
11
tests/system/icingabeat.py
Normal file
11
tests/system/icingabeat.py
Normal file
@ -0,0 +1,11 @@
|
||||
import sys
|
||||
sys.path.append('../../vendor/github.com/elastic/beats/libbeat/tests/system')
|
||||
from beat.beat import TestCase
|
||||
|
||||
class BaseTest(TestCase):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(self):
|
||||
self.beat_name = "icingabeat"
|
||||
self.build_path = "../../build/system-tests/"
|
||||
self.beat_path = "../../icingabeat.test"
|
0
tests/system/requirements.txt
Normal file
0
tests/system/requirements.txt
Normal file
19
tests/system/test_base.py
Normal file
19
tests/system/test_base.py
Normal file
@ -0,0 +1,19 @@
|
||||
from icingabeat import BaseTest
|
||||
|
||||
import os
|
||||
|
||||
|
||||
class Test(BaseTest):
|
||||
|
||||
def test_base(self):
|
||||
"""
|
||||
Basic test with exiting Icingabeat normally
|
||||
"""
|
||||
self.render_config_template(
|
||||
path=os.path.abspath(self.working_dir) + "/log/*"
|
||||
)
|
||||
|
||||
icingabeat_proc = self.start_beat()
|
||||
self.wait_until( lambda: self.log_contains("icingabeat is running"))
|
||||
exit_code = icingabeat_proc.kill_and_wait()
|
||||
assert exit_code == 0
|
107
vendor/github.com/elastic/beats/.appveyor.yml
generated
vendored
Normal file
107
vendor/github.com/elastic/beats/.appveyor.yml
generated
vendored
Normal file
@ -0,0 +1,107 @@
|
||||
# Version format
|
||||
version: "{build}"
|
||||
|
||||
# Operating system (build VM template)
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
# Environment variables
|
||||
environment:
|
||||
GOROOT: c:\go1.7.1
|
||||
GOPATH: c:\gopath
|
||||
PYWIN_DL: https://beats-files.s3.amazonaws.com/deps/pywin32-220.win32-py2.7.exe
|
||||
matrix:
|
||||
- PROJ: github.com\elastic\beats\metricbeat
|
||||
BEAT: metricbeat
|
||||
- PROJ: github.com\elastic\beats\filebeat
|
||||
BEAT: filebeat
|
||||
- PROJ: github.com\elastic\beats\winlogbeat
|
||||
BEAT: winlogbeat
|
||||
|
||||
# Custom clone folder (variables are not expanded here).
|
||||
clone_folder: c:\gopath\src\github.com\elastic\beats
|
||||
|
||||
# Cache mingw install until appveyor.yml is modified.
|
||||
cache:
|
||||
- C:\ProgramData\chocolatey\bin -> .appveyor.yml
|
||||
- C:\ProgramData\chocolatey\lib -> .appveyor.yml
|
||||
- C:\go1.7.1 -> .appveyor.yml
|
||||
- C:\tools\mingw64 -> .appveyor.yml
|
||||
- C:\pywin_inst.exe -> .appveyor.yml
|
||||
|
||||
# Scripts that run after cloning repository
|
||||
install:
|
||||
- ps: c:\gopath\src\github.com\elastic\beats\libbeat\scripts\install-go.ps1 -version 1.7.1
|
||||
- set PATH=%GOROOT%\bin;%PATH%
|
||||
# AppVeyor installed mingw is 32-bit only.
|
||||
- ps: >-
|
||||
if(!(Test-Path "C:\tools\mingw64\bin\gcc.exe")) {
|
||||
cinst mingw > mingw-install.txt
|
||||
Push-AppveyorArtifact mingw-install.txt
|
||||
}
|
||||
- set PATH=C:\tools\mingw64\bin;%GOROOT%\bin;%PATH%
|
||||
- set PATH=%GOPATH%\bin;%PATH%
|
||||
- go install github.com/elastic/beats/vendor/github.com/pierrre/gotestcover
|
||||
- go version
|
||||
- go env
|
||||
# Download the PyWin32 installer if it is not cached.
|
||||
- ps: >-
|
||||
if(!(Test-Path "C:\pywin_inst.exe")) {
|
||||
(new-object net.webclient).DownloadFile("$env:PYWIN_DL", 'C:/pywin_inst.exe')
|
||||
}
|
||||
- set PYTHONPATH=C:\Python27
|
||||
- set PATH=%PYTHONPATH%;%PYTHONPATH%\Scripts;%PATH%
|
||||
- python --version
|
||||
- pip install jinja2 nose nose-timer PyYAML redis
|
||||
- easy_install C:/pywin_inst.exe
|
||||
|
||||
# To run your custom scripts instead of automatic MSBuild
|
||||
build_script:
|
||||
# Compile
|
||||
- appveyor AddCompilationMessage "Starting Compile"
|
||||
- ps: cd $env:BEAT
|
||||
- go build
|
||||
- appveyor AddCompilationMessage "Compile Success" -FileName "%BEAT%.exe"
|
||||
|
||||
# To run your custom scripts instead of automatic tests
|
||||
test_script:
|
||||
# Unit tests
|
||||
- ps: Add-AppveyorTest "Unit Tests" -Outcome Running
|
||||
- mkdir build\coverage
|
||||
- gotestcover -race -coverprofile=build/coverage/integration.cov github.com/elastic/beats/%BEAT%/...
|
||||
- ps: Update-AppveyorTest "Unit Tests" -Outcome Passed
|
||||
# System tests
|
||||
- ps: Add-AppveyorTest "System tests" -Outcome Running
|
||||
- go test -race -c -cover -covermode=atomic -coverpkg ./...
|
||||
- ps: |
|
||||
if ($env:BEAT -eq "metricbeat") {
|
||||
cp .\_meta\fields.common.yml .\_meta\fields.generated.yml
|
||||
python .\scripts\fields_collector.py | out-file -append -encoding UTF8 -filepath .\_meta\fields.generated.yml
|
||||
}
|
||||
- ps: cd tests/system
|
||||
- nosetests --with-timer
|
||||
- ps: Update-AppveyorTest "System tests" -Outcome Passed
|
||||
|
||||
after_test:
|
||||
- ps: cd $env:GOPATH\src\$env:PROJ
|
||||
- python ..\dev-tools\aggregate_coverage.py -o build\coverage\system.cov .\build\system-tests\run
|
||||
- python ..\dev-tools\aggregate_coverage.py -o build\coverage\full.cov .\build\coverage
|
||||
- go tool cover -html=build\coverage\full.cov -o build\coverage\full.html
|
||||
- ps: Push-AppveyorArtifact build\coverage\full.cov
|
||||
- ps: Push-AppveyorArtifact build\coverage\full.html
|
||||
# Upload coverage report.
|
||||
- "SET PATH=C:\\Python34;C:\\Python34\\Scripts;%PATH%"
|
||||
- pip install codecov
|
||||
- ps: cd $env:GOPATH\src\github.com\elastic\beats
|
||||
- codecov -X gcov -f "%BEAT%\build\coverage\full.cov"
|
||||
|
||||
# Executes for both successful and failed builds
|
||||
on_finish:
|
||||
- ps: cd $env:GOPATH\src\$env:PROJ
|
||||
- 7z a -r system-tests-output.zip build\system-tests\run
|
||||
- ps: Push-AppveyorArtifact system-tests-output.zip
|
||||
|
||||
# To disable deployment
|
||||
deploy: off
|
||||
|
||||
# Notifications should only be setup using the AppVeyor UI so that
|
||||
# forks can be created without inheriting the settings.
|
27
vendor/github.com/elastic/beats/.editorconfig
generated
vendored
Normal file
27
vendor/github.com/elastic/beats/.editorconfig
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
# See: http://editorconfig.org
|
||||
root = true
|
||||
|
||||
[*]
|
||||
charset = utf-8
|
||||
end_of_line = lf
|
||||
insert_final_newline = true
|
||||
trim_trailing_whitespace = true
|
||||
|
||||
[*.json]
|
||||
indent_size = 4
|
||||
indent_style = space
|
||||
|
||||
[*.py]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
|
||||
[*.yml]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
|
||||
[Makefile]
|
||||
indent_style = tab
|
||||
|
||||
[Vagrantfile]
|
||||
indent_size = 2
|
||||
indent_style = space
|
6
vendor/github.com/elastic/beats/.gitattributes
generated
vendored
Normal file
6
vendor/github.com/elastic/beats/.gitattributes
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
CHANGELOG.md merge=union
|
||||
CHANGELOG.asciidoc merge=union
|
||||
|
||||
# Keep these file types as CRLF (Windows).
|
||||
*.bat text eol=crlf
|
||||
*.cmd text eol=crlf
|
11
vendor/github.com/elastic/beats/.github/ISSUE_TEMPLATE.md
generated
vendored
Normal file
11
vendor/github.com/elastic/beats/.github/ISSUE_TEMPLATE.md
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
Please post all questions and issues on https://discuss.elastic.co/c/beats
|
||||
before opening a Github Issue. Your questions will reach a wider audience there,
|
||||
and if we confirm that there is a bug, then you can open a new issue.
|
||||
|
||||
For security vulnerabilities please only send reports to security@elastic.co.
|
||||
See https://www.elastic.co/community/security for more information.
|
||||
|
||||
For confirmed bugs, please report:
|
||||
- Version:
|
||||
- Operating System:
|
||||
- Steps to Reproduce:
|
28
vendor/github.com/elastic/beats/.gitignore
generated
vendored
Normal file
28
vendor/github.com/elastic/beats/.gitignore
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
# Directories
|
||||
/.vagrant
|
||||
/.idea
|
||||
/build
|
||||
/*/data
|
||||
/*/logs
|
||||
/*/_meta/kibana/index-pattern
|
||||
|
||||
# Files
|
||||
.DS_Store
|
||||
/glide.lock
|
||||
/beats.iml
|
||||
*.dev.yml
|
||||
*.generated.yml
|
||||
|
||||
# Editor swap files
|
||||
*.swp
|
||||
*.swo
|
||||
*.swn
|
||||
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
||||
*.pyc
|
102
vendor/github.com/elastic/beats/.travis.yml
generated
vendored
Normal file
102
vendor/github.com/elastic/beats/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,102 @@
|
||||
sudo: required
|
||||
dist: trusty
|
||||
services:
|
||||
- docker
|
||||
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.7.1
|
||||
|
||||
# Make sure project can also be built on travis for clones of the repo
|
||||
go_import_path: github.com/elastic/beats
|
||||
|
||||
os:
|
||||
- linux
|
||||
- osx
|
||||
|
||||
env:
|
||||
matrix:
|
||||
- TARGETS="check"
|
||||
- TARGETS="-C filebeat testsuite"
|
||||
- TARGETS="-C heartbeat testsuite"
|
||||
- TARGETS="-C libbeat testsuite"
|
||||
- TARGETS="-C metricbeat testsuite"
|
||||
- TARGETS="-C packetbeat testsuite"
|
||||
- TARGETS="-C libbeat crosscompile"
|
||||
- TARGETS="-C metricbeat crosscompile"
|
||||
- TARGETS="-C winlogbeat crosscompile"
|
||||
- TARGETS="-C libbeat/dashboards"
|
||||
- TARGETS="-C generate/metricbeat/metricset test"
|
||||
- TARGETS="-C generate/beat test"
|
||||
global:
|
||||
# Cross-compile for amd64 only to speed up testing.
|
||||
- GOX_FLAGS="-arch amd64"
|
||||
- DOCKER_COMPOSE_VERSION: 1.8.1
|
||||
|
||||
matrix:
|
||||
exclude:
|
||||
- os: osx
|
||||
env: TARGETS="check"
|
||||
- os: osx
|
||||
env: TARGETS="-C filebeat crosscompile"
|
||||
- os: osx
|
||||
env: TARGETS="-C libbeat crosscompile"
|
||||
- os: osx
|
||||
env: TARGETS="-C metricbeat crosscompile"
|
||||
- os: osx
|
||||
env: TARGETS="-C winlogbeat crosscompile"
|
||||
- os: osx
|
||||
env: TARGETS="-C libbeat testsuite"
|
||||
- os: osx
|
||||
env: TARGETS="-C hearbeat testsuite"
|
||||
- os: osx
|
||||
env: TARGETS="-C metricbeat testsuite"
|
||||
- os: osx
|
||||
env: TARGETS="-C libbeat/dashboards"
|
||||
- os: osx
|
||||
env: TARGETS="-C generate/metricbeat/metricset test"
|
||||
- os: osx
|
||||
env: TARGETS="-C generate/beat test"
|
||||
fast_finish: true
|
||||
allow_failures:
|
||||
- env: TARGETS="-C libbeat crosscompile"
|
||||
- env: TARGETS="-C filebeat crosscompile"
|
||||
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- python-virtualenv
|
||||
- libpcap-dev
|
||||
- geoip-database
|
||||
|
||||
before_install:
|
||||
# Update to most recent docker version
|
||||
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
|
||||
sudo apt-get update;
|
||||
sudo apt-cache search docker;
|
||||
sudo apt-get -o Dpkg::Options::="--force-confnew" install -y docker-engine;
|
||||
fi
|
||||
# Docker-compose installation
|
||||
- sudo rm /usr/local/bin/docker-compose || true
|
||||
- curl -L https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > docker-compose
|
||||
- chmod +x docker-compose
|
||||
- sudo mv docker-compose /usr/local/bin
|
||||
|
||||
install:
|
||||
- true
|
||||
|
||||
script:
|
||||
- make $TARGETS
|
||||
|
||||
notifications:
|
||||
slack:
|
||||
rooms:
|
||||
secure: "e25J5puEA31dOooTI4T+K+zrTs8XeWIGq2cgmiPt9u/g7eqWeQj1UJnVsr8GOu1RPDyuJZJHXqfrvuOYJTdHzXbwjD0JTbwwVVZMkkZW2SWZHG46HCXPiucjWXEr3hXJKBJDDpIx6VxrN7r17dejv1biQ8QuEFZfiB1H8kbH/ho="
|
||||
|
||||
after_success:
|
||||
# Copy full.cov to coverage.txt because codecov.io requires this file
|
||||
- test -f filebeat/build/coverage/full.cov && bash <(curl -s https://codecov.io/bash) -f filebeat/build/coverage/full.cov
|
||||
- test -f packetbeat/build/coverage/full.cov && bash <(curl -s https://codecov.io/bash) -f packetbeat/build/coverage/full.cov
|
||||
- test -f libbeat/build/coverage/full.cov && bash <(curl -s https://codecov.io/bash) -f libbeat/build/coverage/full.cov
|
||||
- test -f metricbeat/build/coverage/full.cov && bash <(curl -s https://codecov.io/bash) -f metricbeat/build/coverage/full.cov
|
1229
vendor/github.com/elastic/beats/CHANGELOG.asciidoc
generated
vendored
Normal file
1229
vendor/github.com/elastic/beats/CHANGELOG.asciidoc
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
116
vendor/github.com/elastic/beats/CONTRIBUTING.md
generated
vendored
Normal file
116
vendor/github.com/elastic/beats/CONTRIBUTING.md
generated
vendored
Normal file
@ -0,0 +1,116 @@
|
||||
Please post all questions and issues first on
|
||||
[https://discuss.elastic.co/c/beats](https://discuss.elastic.co/c/beats)
|
||||
before opening a Github Issue.
|
||||
|
||||
# Contributing to Beats
|
||||
|
||||
The Beats are open source and we love to receive contributions from our
|
||||
community — you!
|
||||
|
||||
There are many ways to contribute, from writing tutorials or blog posts,
|
||||
improving the documentation, submitting bug reports and feature requests or
|
||||
writing code for implementing a whole new protocol.
|
||||
|
||||
If you have a bugfix or new feature that you would like to contribute, please
|
||||
start by opening a topic on the [forums](https://discuss.elastic.co/c/beats).
|
||||
It may be that somebody is already working on it, or that there are particular
|
||||
issues that you should know about before implementing the change.
|
||||
|
||||
We enjoy working with contributors to get their code accepted. There are many
|
||||
approaches to fixing a problem and it is important to find the best approach
|
||||
before writing too much code.
|
||||
|
||||
The process for contributing to any of the Elastic repositories is similar.
|
||||
|
||||
## Contribution Steps
|
||||
|
||||
1. Please make sure you have signed our [Contributor License
|
||||
Agreement](https://www.elastic.co/contributor-agreement/). We are not
|
||||
asking you to assign copyright to us, but to give us the right to distribute
|
||||
your code without restriction. We ask this of all contributors in order to
|
||||
assure our users of the origin and continuing existence of the code. You
|
||||
only need to sign the CLA once.
|
||||
2. Send a pull request! Push your changes to your fork of the repository and
|
||||
[submit a pull
|
||||
request](https://help.github.com/articles/using-pull-requests). In the pull
|
||||
request, describe what your changes do and mention any bugs/issues related
|
||||
to the pull request.
|
||||
|
||||
|
||||
## Adding a new Beat
|
||||
|
||||
If you want to create a new Beat, please read our [developer
|
||||
guide](https://www.elastic.co/guide/en/beats/libbeat/current/new-beat.html).
|
||||
You don't need to submit the code to this repository. Most new Beats start in
|
||||
their own repository and just make use of the libbeat packages. After you have
|
||||
a working Beat that you'd like to share with others, open a PR to add it to our
|
||||
list of [community
|
||||
Beats](https://github.com/elastic/beats/blob/master/libbeat/docs/communitybeats.asciidoc).
|
||||
|
||||
## Setting up your dev environment
|
||||
|
||||
The Beats are Go programs, so install the latest version of
|
||||
[golang](http://golang.org/) if you don't have it already. The current Go version
|
||||
used for development is Golang 1.7.1.
|
||||
|
||||
The location where you clone is important. Please clone under the source
|
||||
directory of your `GOPATH`. If you don't have `GOPATH` already set, you can
|
||||
simply set it to your home directory (`export GOPATH=$HOME`).
|
||||
|
||||
$ mkdir -p $GOPATH/src/github.com/elastic
|
||||
$ cd $GOPATH/src/github.com/elastic
|
||||
$ git clone https://github.com/elastic/beats.git
|
||||
|
||||
Then you can compile a particular Beat by using the Makefile. For example, for
|
||||
Packetbeat:
|
||||
|
||||
$ cd beats/packetbeat
|
||||
$ make
|
||||
|
||||
Some of the Beats might have extra development requirements, in which case you'll find a
|
||||
CONTRIBUTING.md file in the Beat directory.
|
||||
|
||||
## Update scripts
|
||||
|
||||
The Beats use a variety of scripts based on Python to generate configuration files
|
||||
and documentation. The command used for this is:
|
||||
|
||||
$ make update
|
||||
|
||||
This command has the following dependencies:
|
||||
|
||||
* Python >=2.7.9
|
||||
* [virtualenv](https://virtualenv.pypa.io/en/latest/) for Python
|
||||
|
||||
Virtualenv can be installed with the command `easy_install virtualenv` or `pip install virtualenv`.
|
||||
More details can be found [here](https://virtualenv.pypa.io/en/latest/installation.html).
|
||||
|
||||
|
||||
## Testing
|
||||
|
||||
You can run the whole testsuite with the following command:
|
||||
|
||||
$ make testsuite
|
||||
|
||||
Running the testsuite has the following requirements:
|
||||
|
||||
* Python >=2.7.9
|
||||
* Docker >=1.10.0
|
||||
* Docker-compose >= 1.8.0
|
||||
|
||||
|
||||
## Documentation
|
||||
|
||||
The documentation for each Beat is located under {beatname}/docs and is based on asciidoc. After changing the docs,
|
||||
you should verify that the docs are still building to avoid breaking the automated docs build. To build the docs run
|
||||
`make docs`. If you want to preview the docs for a specific Beat, run `make docs-preview`
|
||||
inside the folder for the Beat. This will automatically open your browser with the docs for preview.
|
||||
|
||||
|
||||
## Dependencies
|
||||
|
||||
To manage the `vendor/` folder we use
|
||||
[glide](https://github.com/Masterminds/glide), which uses
|
||||
[glide.yaml](glide.yaml) as a manifest file for the dependencies. Please see
|
||||
the glide documentation on how to add or update vendored dependencies.
|
||||
|
15
vendor/github.com/elastic/beats/Dockerfile
generated
vendored
Normal file
15
vendor/github.com/elastic/beats/Dockerfile
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
FROM golang:1.7.1
|
||||
MAINTAINER Nicolas Ruflin <ruflin@elastic.co>
|
||||
|
||||
RUN set -x && \
|
||||
apt-get update && \
|
||||
apt-get install -y netcat && \
|
||||
apt-get clean
|
||||
|
||||
COPY libbeat/scripts/docker-entrypoint.sh /entrypoint.sh
|
||||
|
||||
RUN mkdir -p /etc/pki/tls/certs
|
||||
COPY testing/environments/docker/logstash/pki/tls/certs/logstash.crt /etc/pki/tls/certs/logstash.crt
|
||||
|
||||
# Create a copy of the respository inside the container.
|
||||
COPY . /go/src/github.com/elastic/beats/
|
13
vendor/github.com/elastic/beats/LICENSE
generated
vendored
Normal file
13
vendor/github.com/elastic/beats/LICENSE
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
Copyright (c) 2012–2016 Elasticsearch <http://www.elastic.co>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
111
vendor/github.com/elastic/beats/Makefile
generated
vendored
Normal file
111
vendor/github.com/elastic/beats/Makefile
generated
vendored
Normal file
@ -0,0 +1,111 @@
|
||||
|
||||
BUILD_DIR=build
|
||||
COVERAGE_DIR=${BUILD_DIR}/coverage
|
||||
BEATS=packetbeat filebeat winlogbeat metricbeat heartbeat
|
||||
PROJECTS=libbeat ${BEATS}
|
||||
SNAPSHOT?=yes
|
||||
|
||||
# Runs complete testsuites (unit, system, integration) for all beats with coverage and race detection.
|
||||
# Also it builds the docs and the generators
|
||||
.PHONY: testsuite
|
||||
testsuite:
|
||||
$(foreach var,$(PROJECTS),$(MAKE) -C $(var) testsuite || exit 1;)
|
||||
#$(MAKE) -C generate test
|
||||
|
||||
# Runs unit and system tests without coverage and race detection.
|
||||
.PHONY: test
|
||||
test:
|
||||
$(foreach var,$(PROJECTS),$(MAKE) -C $(var) test || exit 1;)
|
||||
|
||||
# Runs unit tests without coverage and race detection.
|
||||
.PHONY: unit
|
||||
unit:
|
||||
$(foreach var,$(PROJECTS),$(MAKE) -C $(var) unit || exit 1;)
|
||||
|
||||
.PHONY: coverage-report
|
||||
coverage-report:
|
||||
mkdir -p ${COVERAGE_DIR}
|
||||
# Writes atomic mode on top of file
|
||||
echo 'mode: atomic' > ./${COVERAGE_DIR}/full.cov
|
||||
# Collects all coverage files and skips top line with mode
|
||||
-tail -q -n +2 ./filebeat/${COVERAGE_DIR}/*.cov >> ./${COVERAGE_DIR}/full.cov
|
||||
-tail -q -n +2 ./packetbeat/${COVERAGE_DIR}/*.cov >> ./${COVERAGE_DIR}/full.cov
|
||||
-tail -q -n +2 ./winlogbeat/${COVERAGE_DIR}/*.cov >> ./${COVERAGE_DIR}/full.cov
|
||||
-tail -q -n +2 ./libbeat/${COVERAGE_DIR}/*.cov >> ./${COVERAGE_DIR}/full.cov
|
||||
go tool cover -html=./${COVERAGE_DIR}/full.cov -o ${COVERAGE_DIR}/full.html
|
||||
|
||||
.PHONY: update
|
||||
update:
|
||||
$(MAKE) -C libbeat collect
|
||||
$(foreach var,$(BEATS),$(MAKE) -C $(var) update || exit 1;)
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
rm -rf build
|
||||
$(foreach var,$(PROJECTS),$(MAKE) -C $(var) clean || exit 1;)
|
||||
$(MAKE) -C generate clean
|
||||
|
||||
# Cleans up the vendor directory from unnecessary files
|
||||
# This should always be run after updating the dependencies
|
||||
.PHONY: clean-vendor
|
||||
clean-vendor:
|
||||
sh scripts/clean_vendor.sh
|
||||
|
||||
.PHONY: check
|
||||
check:
|
||||
$(foreach var,$(PROJECTS),$(MAKE) -C $(var) check || exit 1;)
|
||||
# Validate that all updates were commited
|
||||
$(MAKE) update
|
||||
git update-index --refresh
|
||||
git diff-index --exit-code HEAD --
|
||||
|
||||
.PHONY: fmt
|
||||
fmt:
|
||||
$(foreach var,$(PROJECTS),$(MAKE) -C $(var) fmt || exit 1;)
|
||||
|
||||
.PHONY: simplify
|
||||
simplify:
|
||||
$(foreach var,$(PROJECTS),$(MAKE) -C $(var) simplify || exit 1;)
|
||||
|
||||
# Collects all dashboards and generates dashboard folder for https://github.com/elastic/beats-dashboards/tree/master/dashboards
|
||||
.PHONY: beats-dashboards
|
||||
beats-dashboards:
|
||||
mkdir -p build/dashboards
|
||||
$(foreach var,$(BEATS),cp -r $(var)/_meta/kibana/ build/dashboards/$(var) || exit 1;)
|
||||
|
||||
# Builds the documents for each beat
|
||||
.PHONY: docs
|
||||
docs:
|
||||
sh libbeat/scripts/build_docs.sh ${PROJECTS}
|
||||
|
||||
.PHONY: package
|
||||
package: update beats-dashboards
|
||||
$(foreach var,$(BEATS),SNAPSHOT=$(SNAPSHOT) $(MAKE) -C $(var) package || exit 1;)
|
||||
|
||||
# build the dashboards package
|
||||
echo "Start building the dashboards package"
|
||||
mkdir -p build/upload/
|
||||
BUILD_DIR=${shell pwd}/build SNAPSHOT=$(SNAPSHOT) $(MAKE) -C dev-tools/packer package-dashboards ${shell pwd}/build/upload/build_id.txt
|
||||
mv build/upload build/dashboards-upload
|
||||
|
||||
# Copy build files over to top build directory
|
||||
mkdir -p build/upload/
|
||||
$(foreach var,$(BEATS),cp -r $(var)/build/upload/ build/upload/$(var) || exit 1;)
|
||||
cp -r build/dashboards-upload build/upload/dashboards
|
||||
|
||||
# Upload nightly builds to S3
|
||||
.PHONY: upload-nightlies-s3
|
||||
upload-nightlies-s3: all
|
||||
aws s3 cp --recursive --acl public-read build/upload s3://beats-nightlies
|
||||
|
||||
# Run after building to sign packages and publish to APT and YUM repos.
|
||||
.PHONY: package-upload
|
||||
upload-package:
|
||||
$(MAKE) -C dev-tools/packer deb-rpm-s3
|
||||
# You must export AWS_ACCESS_KEY=<AWS access> and export AWS_SECRET_KEY=<secret>
|
||||
# before running this make target.
|
||||
dev-tools/packer/docker/deb-rpm-s3/deb-rpm-s3.sh
|
||||
|
||||
.PHONY: release-upload
|
||||
upload-release:
|
||||
aws s3 cp --recursive --acl public-read build/upload s3://download.elasticsearch.org/beats/
|
76
vendor/github.com/elastic/beats/README.md
generated
vendored
Normal file
76
vendor/github.com/elastic/beats/README.md
generated
vendored
Normal file
@ -0,0 +1,76 @@
|
||||
[](https://travis-ci.org/elastic/beats)
|
||||
[](https://ci.appveyor.com/project/elastic-beats/beats/branch/master)
|
||||
[](http://goreportcard.com/report/elastic/beats)
|
||||
[](https://codecov.io/github/elastic/beats?branch=master)
|
||||
|
||||
# Beats - The Lightweight Shippers of the Elastic Stack
|
||||
|
||||
The [Beats](https://www.elastic.co/products/beats) are lightweight data
|
||||
shippers, written in Go, that you install on your servers to capture all sorts
|
||||
of operational data (think of logs, metrics, or network packet data). The Beats
|
||||
send the operational data to Elasticsearch, either directly or via Logstash, so
|
||||
it can be visualized with Kibana.
|
||||
|
||||
By "lightweight", we mean that Beats have a small installation footprint, use
|
||||
limited system resources, and have no runtime dependencies.
|
||||
|
||||
This repository contains
|
||||
[libbeat](https://github.com/elastic/beats/tree/master/libbeat), our Go
|
||||
framework for creating Beats, and all the officially supported Beats:
|
||||
|
||||
Beat | Description
|
||||
--- | ---
|
||||
[Filebeat](https://github.com/elastic/beats/tree/master/filebeat) | Tails and ships log files
|
||||
[Metricbeat](https://github.com/elastic/beats/tree/master/metricbeat) | Fetches sets of metrics from the operating system and services
|
||||
[Packetbeat](https://github.com/elastic/beats/tree/master/packetbeat) | Monitors the network and applications by sniffing packets
|
||||
[Winlogbeat](https://github.com/elastic/beats/tree/master/winlogbeat) | Fetches and ships Windows Event logs
|
||||
|
||||
In addition to the above Beats, which are officially supported by
|
||||
[Elastic](elastic.co), the
|
||||
community has created a set of other Beats that make use of libbeat but live
|
||||
outside of this Github repository. We maintain a list of community Beats
|
||||
[here](https://www.elastic.co/guide/en/beats/libbeat/master/community-beats.html).
|
||||
|
||||
## Documentation and Getting Started
|
||||
|
||||
You can find the documentation and getting started guides for each of the Beats
|
||||
on the [elastic.co site](https://www.elastic.co/guide/):
|
||||
|
||||
* [Beats platform](https://www.elastic.co/guide/en/beats/libbeat/current/index.html)
|
||||
* [Filebeat](https://www.elastic.co/guide/en/beats/filebeat/current/index.html)
|
||||
* [Metricbeat](https://www.elastic.co/guide/en/beats/metricbeat/current/index.html)
|
||||
* [Packetbeat](https://www.elastic.co/guide/en/beats/packetbeat/current/index.html)
|
||||
* [Winlogbeat](https://www.elastic.co/guide/en/beats/winlogbeat/current/index.html)
|
||||
|
||||
|
||||
## Getting Help
|
||||
|
||||
If you need help or hit an issue, please start by opening a topic on our
|
||||
[discuss forums](https://discuss.elastic.co/c/beats). Please note that we
|
||||
reserve GitHub tickets for confirmed bugs and enhancement requests.
|
||||
|
||||
## Downloads
|
||||
|
||||
You can download pre-compiled Beats binaries, as well as packages for the
|
||||
supported platforms, from [this page](https://www.elastic.co/downloads/beats).
|
||||
|
||||
## Contributing
|
||||
|
||||
We'd love working with you! You can help make the Beats better in many ways:
|
||||
report issues, help us reproduce issues, fix bugs, add functionality, or even
|
||||
create your own Beat.
|
||||
|
||||
Please start by reading our [CONTRIBUTING](CONTRIBUTING.md) file.
|
||||
|
||||
If you are creating a new Beat, you don't need to submit the code to this
|
||||
repository. You can simply start working in a new repository and make use of
|
||||
the libbeat packages, by following our [developer
|
||||
guide](https://www.elastic.co/guide/en/beats/libbeat/master/new-beat.html).
|
||||
After you have a working prototype, open a pull request to add your Beat to the
|
||||
list of [community
|
||||
Beats](https://github.com/elastic/beats/blob/master/libbeat/docs/communitybeats.asciidoc).
|
||||
|
||||
## Building Beats from the Source
|
||||
|
||||
See our [CONTRIBUTING](CONTRIBUTING.md) file for information about setting up your dev
|
||||
environment to build Beats from the source.
|
115
vendor/github.com/elastic/beats/Vagrantfile
generated
vendored
Normal file
115
vendor/github.com/elastic/beats/Vagrantfile
generated
vendored
Normal file
@ -0,0 +1,115 @@
|
||||
### Documentation
|
||||
# This is a Vagrantfile for Beats development.
|
||||
#
|
||||
# Boxes
|
||||
# =====
|
||||
#
|
||||
# win2012
|
||||
# -------
|
||||
# This box is used as a Windows development and testing environment for Beats.
|
||||
#
|
||||
# Usage and Features:
|
||||
# - Two users exist: Administartor and Vagrant. Both have the password: vagrant
|
||||
# - Use 'vagrant ssh' to open a Windows command prompt.
|
||||
# - Use 'vagrant rdp' to open a Windows Remote Deskop session. Mac users must
|
||||
# install the Microsoft Remote Desktop Client from the App Store.
|
||||
# - There is a desktop shortcut labeled "Beats Shell" that opens a command prompt
|
||||
# to C:\Gopath\src\github.com\elastic\beats where the code is mounted.
|
||||
#
|
||||
# solaris
|
||||
# -------------------
|
||||
# - Use gmake instead of make.
|
||||
#
|
||||
# freebsd and openbsd
|
||||
# -------------------
|
||||
# - Use gmake instead of make.
|
||||
# - Folder syncing doesn't work well. Consider copying the files into the box or
|
||||
# cloning the project inside the box.
|
||||
|
||||
# Provisioning for Windows PowerShell
|
||||
$winPsProvision = <<SCRIPT
|
||||
echo 'Creating github.com\elastic in the GOPATH'
|
||||
New-Item -itemtype directory -path "C:\\Gopath\\src\\github.com\\elastic" -force
|
||||
echo "Symlinking C:\\Vagrant to C:\\Gopath\\src\\github.com\\elastic"
|
||||
cmd /c mklink /d C:\\Gopath\\src\\github.com\\elastic\\beats \\\\vboxsvr\\vagrant
|
||||
|
||||
echo "Creating Beats Shell desktop shortcut"
|
||||
$WshShell = New-Object -comObject WScript.Shell
|
||||
$Shortcut = $WshShell.CreateShortcut("$Home\\Desktop\\Beats Shell.lnk")
|
||||
$Shortcut.TargetPath = "cmd.exe"
|
||||
$Shortcut.Arguments = "/K cd /d C:\\Gopath\\src\\github.com\\elastic\\beats"
|
||||
$Shortcut.Save()
|
||||
|
||||
echo "Disable automatic updates"
|
||||
$AUSettigns = (New-Object -com "Microsoft.Update.AutoUpdate").Settings
|
||||
$AUSettigns.NotificationLevel = 1
|
||||
$AUSettigns.Save()
|
||||
SCRIPT
|
||||
|
||||
# Provisioning for Unix/Linux
|
||||
$unixProvision = <<SCRIPT
|
||||
echo 'Creating github.com/elastic in the GOPATH'
|
||||
mkdir -p ~/go/src/github.com/elastic
|
||||
echo 'Symlinking /vagrant to ~/go/src/github.com/elastic'
|
||||
cd ~/go/src/github.com/elastic
|
||||
if [ -d "/vagrant" ]; then ln -s /vagrant beats; fi
|
||||
SCRIPT
|
||||
|
||||
Vagrant.configure(2) do |config|
|
||||
|
||||
# Windows Server 2012 R2
|
||||
config.vm.define "win2012", primary: true do |win2012|
|
||||
|
||||
win2012.vm.box = "https://s3.amazonaws.com/beats-files/vagrant/beats-win2012-r2-virtualbox-2016-10-28_1224.box"
|
||||
win2012.vm.guest = :windows
|
||||
|
||||
# Communicator for windows boxes
|
||||
win2012.vm.communicator = "winrm"
|
||||
|
||||
# Port forward WinRM and RDP
|
||||
win2012.vm.network :forwarded_port, guest: 22, host: 2222, id: "ssh", auto_correct: true
|
||||
win2012.vm.network :forwarded_port, guest: 3389, host: 33389, id: "rdp", auto_correct: true
|
||||
win2012.vm.network :forwarded_port, guest: 5985, host: 55985, id: "winrm", auto_correct: true
|
||||
|
||||
win2012.vm.provision "shell", inline: $winPsProvision
|
||||
end
|
||||
|
||||
# Solaris 11.2
|
||||
config.vm.define "solaris", primary: true do |solaris|
|
||||
solaris.vm.box = "https://s3.amazonaws.com/beats-files/vagrant/beats-solaris-11.2-virtualbox-2016-11-02_1603.box"
|
||||
solaris.vm.network :forwarded_port, guest: 22, host: 2223, id: "ssh", auto_correct: true
|
||||
|
||||
solaris.vm.provision "shell", inline: $unixProvision, privileged: false
|
||||
end
|
||||
|
||||
# FreeBSD 11.0
|
||||
config.vm.define "freebsd", primary: true do |freebsd|
|
||||
freebsd.vm.box = "https://s3.amazonaws.com/beats-files/vagrant/beats-freebsd-11.0-virtualbox-2016-11-02_1638.box"
|
||||
freebsd.vm.network :forwarded_port, guest: 22, host: 2224, id: "ssh", auto_correct: true
|
||||
|
||||
# Must use NFS to sync a folder on FreeBSD and this requires a host-only network.
|
||||
# To enable the /vagrant folder, set disabled to false and uncomment the private_network.
|
||||
config.vm.synced_folder ".", "/vagrant", id: "vagrant-root", :nfs => true, disabled: true
|
||||
#config.vm.network "private_network", ip: "192.168.135.18"
|
||||
|
||||
freebsd.vm.provision "shell", inline: $unixProvision, privileged: false
|
||||
end
|
||||
|
||||
# OpenBSD 5.9-stable
|
||||
config.vm.define "openbsd", primary: true do |openbsd|
|
||||
openbsd.vm.box = "https://s3.amazonaws.com/beats-files/vagrant/beats-openbsd-5.9-current-virtualbox-2016-11-02_2007.box"
|
||||
openbsd.vm.network :forwarded_port, guest: 22, host: 2225, id: "ssh", auto_correct: true
|
||||
|
||||
config.vm.synced_folder ".", "/vagrant", type: "rsync", disabled: true
|
||||
config.vm.provider :virtualbox do |vbox|
|
||||
vbox.check_guest_additions = false
|
||||
vbox.functional_vboxsf = false
|
||||
end
|
||||
|
||||
openbsd.vm.provision "shell", inline: $unixProvision, privileged: false
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
1
vendor/github.com/elastic/beats/codecov.yml
generated
vendored
Normal file
1
vendor/github.com/elastic/beats/codecov.yml
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
comment: false
|
4
vendor/github.com/elastic/beats/dev-tools/.beatconfig
generated
vendored
Normal file
4
vendor/github.com/elastic/beats/dev-tools/.beatconfig
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
packetbeat-/packetbeat-
|
||||
filebeat-/filebeat-
|
||||
winlogonbeat-/winlogonbeat-
|
||||
logstash-/logstash-
|
51
vendor/github.com/elastic/beats/dev-tools/README.md
generated
vendored
Normal file
51
vendor/github.com/elastic/beats/dev-tools/README.md
generated
vendored
Normal file
@ -0,0 +1,51 @@
|
||||
Available scripts
|
||||
-----------------
|
||||
|
||||
|
||||
The following scripts are used by the unified release process:
|
||||
|
||||
| File | Description |
|
||||
|----------------------|-------------|
|
||||
| get_version | Returns the current version |
|
||||
| set_version | Sets the current version in all places where change is required. Doesn't commit changes. |
|
||||
| deploy | Builds all artifacts for the officially supported Beats |
|
||||
|
||||
|
||||
|
||||
Other scripts:
|
||||
|
||||
|
||||
| File | Description |
|
||||
|----------------------|-------------|
|
||||
| aggregate_coverage.py | Used to create coverage reports that contain both unit and system tests data |
|
||||
| merge_pr | Used to make it easier to open a PR that merges one branch into another. |
|
||||
|
||||
|
||||
Import / export the dashboards of a single Beat:
|
||||
|
||||
| File | Description |
|
||||
|-----------------------|-------------|
|
||||
| import_dashboards.sh | Bash script to import the Beat dashboards from a local directory in Elasticsearch |
|
||||
| import_dashboards.ps1 | Powershell script to import the Beat dashboards from a local directory in Elasticsearch |
|
||||
| export_dashboards.py | Python script to export the Beat dashboards from Elasticsearch to a local directory|
|
||||
|
||||
Running export_dashboards.py in environment
|
||||
----------------------------------------------
|
||||
|
||||
If you are running the python script for the first time, you need to create the
|
||||
environment by running the following commands in the `beats/dev-tools`
|
||||
directory:
|
||||
|
||||
```
|
||||
virtualenv env
|
||||
. env/bin/activate
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
This creates the environment that contains all the python packages required to
|
||||
run the `export_dashboards.py` script. Thus, for the next runs you just need
|
||||
to enable the enviroment:
|
||||
|
||||
```
|
||||
. env/bin/activate
|
||||
```
|
49
vendor/github.com/elastic/beats/dev-tools/aggregate_coverage.py
generated
vendored
Normal file
49
vendor/github.com/elastic/beats/dev-tools/aggregate_coverage.py
generated
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
"""Simple script to concatenate coverage reports.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import argparse
|
||||
import fnmatch
|
||||
|
||||
def main(arguments):
|
||||
|
||||
parser = argparse.ArgumentParser(description=__doc__,
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter)
|
||||
parser.add_argument('dir', help="Input dir to search recursively for .cov files")
|
||||
parser.add_argument('-o', '--outfile', help="Output file",
|
||||
default=sys.stdout, type=argparse.FileType('w'))
|
||||
|
||||
args = parser.parse_args(arguments)
|
||||
|
||||
# Recursively find all matching .cov files.
|
||||
matches = []
|
||||
for root, dirnames, filenames in os.walk(args.dir):
|
||||
for filename in fnmatch.filter(filenames, '*.cov'):
|
||||
matches.append(os.path.join(root, filename))
|
||||
|
||||
# Write to output.
|
||||
lines = {}
|
||||
args.outfile.write('mode: atomic\n')
|
||||
for m in matches:
|
||||
if os.path.abspath(args.outfile.name) != os.path.abspath(m):
|
||||
with open(m) as f:
|
||||
for line in f:
|
||||
if not line.startswith('mode:') and "vendor" not in line:
|
||||
(position, stmt, count) = line.split(" ")
|
||||
stmt = int(stmt)
|
||||
count = int (count)
|
||||
prev_count = 0
|
||||
if lines.has_key(position):
|
||||
(_, prev_stmt, prev_count) = lines[position]
|
||||
assert prev_stmt == stmt
|
||||
lines[position] = (position, stmt, prev_count + count)
|
||||
|
||||
for line in sorted(["%s %d %d\n" % lines[key] for key in lines.keys()]):
|
||||
args.outfile.write(line)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main(sys.argv[1:]))
|
99
vendor/github.com/elastic/beats/dev-tools/cherrypick_pr
generated
vendored
Executable file
99
vendor/github.com/elastic/beats/dev-tools/cherrypick_pr
generated
vendored
Executable file
@ -0,0 +1,99 @@
|
||||
#!/usr/bin/env python
|
||||
import sys
|
||||
import argparse
|
||||
from subprocess import check_call, call, check_output
|
||||
|
||||
"""
|
||||
Example usage:
|
||||
|
||||
./dev-tools/cherrypick_pr 5.0 2565 6490604aa0cf7fa61932a90700e6ca988fc8a527
|
||||
|
||||
In case of backporting errors, fix them, then run:
|
||||
|
||||
git cherry-pick --continue
|
||||
./dev-tools/cherrypick_pr 5.0 2565 6490604aa0cf7fa61932a90700e6ca988fc8a527 --continue
|
||||
|
||||
This script does the following:
|
||||
|
||||
* cleanups both from_branch and to_branch (warning: drops local changes)
|
||||
* creates a temporary branch named something like "branch_2565"
|
||||
* calls the git cherry-pick command in this branch
|
||||
* after fixing the merge errors (if needed), pushes the branch to your
|
||||
remote
|
||||
|
||||
You then just need to go to Github and open the PR.
|
||||
|
||||
Note that you need to take the commit hashes from `git log` on the
|
||||
from_branch, copying the IDs from Github doesn't work in case we squashed the
|
||||
PR.
|
||||
"""
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Creates a PR for merging two branches")
|
||||
parser.add_argument("to_branch",
|
||||
help="To branch (e.g 5.0)")
|
||||
parser.add_argument("pr_number",
|
||||
help="The PR number being merged (e.g. 2345)")
|
||||
parser.add_argument("commit_hashes", metavar="hash", nargs="+",
|
||||
help="The commit hashes to cherry pick." +
|
||||
" You can specify multiple.")
|
||||
parser.add_argument("--yes", action="store_true",
|
||||
help="Assume yes. Warning: discards local changes.")
|
||||
parser.add_argument("--continue", action="store_true",
|
||||
help="Continue after fixing merging errors.")
|
||||
parser.add_argument("--from_branch", default="master",
|
||||
help="From branch")
|
||||
args = parser.parse_args()
|
||||
|
||||
print args
|
||||
|
||||
tmp_branch = "backport_{}".format(args.pr_number)
|
||||
|
||||
if not vars(args)["continue"]:
|
||||
if not args.yes and raw_input("This will destroy all local changes. " +
|
||||
"Continue? [y/n]: ") != "y":
|
||||
return 1
|
||||
check_call("git reset --hard", shell=True)
|
||||
check_call("git clean -df", shell=True)
|
||||
check_call("git fetch", shell=True)
|
||||
|
||||
check_call("git checkout {}".format(args.from_branch), shell=True)
|
||||
check_call("git pull", shell=True)
|
||||
|
||||
check_call("git checkout {}".format(args.to_branch), shell=True)
|
||||
check_call("git pull", shell=True)
|
||||
|
||||
call("git branch -D {} > /dev/null".format(tmp_branch), shell=True)
|
||||
check_call("git checkout -b {}".format(tmp_branch), shell=True)
|
||||
if call("git cherry-pick -x {}".format(" ".join(args.commit_hashes)),
|
||||
shell=True) != 0:
|
||||
print("Looks like you have cherry-pick errors.")
|
||||
print("Fix them, then run: ")
|
||||
print(" git cherry-pick --continue")
|
||||
print(" {} --continue".format(" ".join(sys.argv)))
|
||||
return 1
|
||||
|
||||
if len(check_output("git status -s", shell=True).strip()) > 0:
|
||||
print("Looks like you have uncommitted changes." +
|
||||
" Please execute first: git cherry-pick --continue")
|
||||
return 1
|
||||
|
||||
if len(check_output("git log HEAD...{}".format(args.to_branch),
|
||||
shell=True).strip()) == 0:
|
||||
print("No commit to push")
|
||||
return 1
|
||||
|
||||
print("Ready to push branch.")
|
||||
remote = raw_input("To which remote should I push? (your fork): ")
|
||||
call("git push {} :{} > /dev/null".format(remote, tmp_branch),
|
||||
shell=True)
|
||||
check_call("git push --set-upstream {} {}"
|
||||
.format(remote, tmp_branch), shell=True)
|
||||
print("Done. Open PR by following this URL: \n\t" +
|
||||
"https://github.com/elastic/beats/compare/{}...{}:{}?expand=1"
|
||||
.format(args.to_branch, remote, tmp_branch))
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
25
vendor/github.com/elastic/beats/dev-tools/deploy
generated
vendored
Executable file
25
vendor/github.com/elastic/beats/dev-tools/deploy
generated
vendored
Executable file
@ -0,0 +1,25 @@
|
||||
#!/usr/bin/env python
|
||||
import os
|
||||
import argparse
|
||||
from subprocess import check_call
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Builds all the Beats artifacts")
|
||||
parser.add_argument("--no-snapshot", action="store_true",
|
||||
help="Don't append -SNAPSHOT to the version.")
|
||||
args = parser.parse_args()
|
||||
|
||||
dir = os.path.dirname(os.path.realpath(__file__))
|
||||
os.chdir(dir + "/../")
|
||||
print("Getting dependencies")
|
||||
check_call("make clean", shell=True)
|
||||
print("Done building Docker images.")
|
||||
if args.no_snapshot:
|
||||
check_call("make SNAPSHOT=no package", shell=True)
|
||||
else:
|
||||
check_call("make SNAPSHOT=yes package", shell=True)
|
||||
print("All done")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
116
vendor/github.com/elastic/beats/dev-tools/export_dashboards.py
generated
vendored
Normal file
116
vendor/github.com/elastic/beats/dev-tools/export_dashboards.py
generated
vendored
Normal file
@ -0,0 +1,116 @@
|
||||
from elasticsearch import Elasticsearch
|
||||
import argparse
|
||||
import os
|
||||
import json
|
||||
import re
|
||||
|
||||
|
||||
def ExportDashboards(es, regex, kibana_index, output_directory):
|
||||
res = es.search(
|
||||
index=kibana_index,
|
||||
doc_type="dashboard",
|
||||
size=1000)
|
||||
|
||||
try:
|
||||
reg_exp = re.compile(regex, re.IGNORECASE)
|
||||
except:
|
||||
print("Wrong regex {}".format(regex))
|
||||
return
|
||||
|
||||
for doc in res['hits']['hits']:
|
||||
|
||||
if not reg_exp.match(doc["_source"]["title"]):
|
||||
print("Ignore dashboard", doc["_source"]["title"])
|
||||
continue
|
||||
|
||||
# save dashboard
|
||||
SaveJson("dashboard", doc, output_directory)
|
||||
|
||||
# save dependencies
|
||||
panels = json.loads(doc['_source']['panelsJSON'])
|
||||
for panel in panels:
|
||||
if panel["type"] == "visualization":
|
||||
ExportVisualization(
|
||||
es,
|
||||
panel["id"],
|
||||
kibana_index,
|
||||
output_directory)
|
||||
elif panel["type"] == "search":
|
||||
ExportSearch(
|
||||
es,
|
||||
panel["id"],
|
||||
kibana_index,
|
||||
output_directory)
|
||||
else:
|
||||
print("Unknown type {} in dashboard".format(panel["type"]))
|
||||
|
||||
|
||||
def ExportVisualization(es, visualization, kibana_index, output_directory):
|
||||
doc = es.get(
|
||||
index=kibana_index,
|
||||
doc_type="visualization",
|
||||
id=visualization)
|
||||
|
||||
# save visualization
|
||||
SaveJson("visualization", doc, output_directory)
|
||||
|
||||
# save dependencies
|
||||
if "savedSearchId" in doc["_source"]:
|
||||
search = doc["_source"]['savedSearchId']
|
||||
ExportSearch(
|
||||
es,
|
||||
search,
|
||||
kibana_index,
|
||||
output_directory)
|
||||
|
||||
|
||||
def ExportSearch(es, search, kibana_index, output_directory):
|
||||
doc = es.get(
|
||||
index=kibana_index,
|
||||
doc_type="search",
|
||||
id=search)
|
||||
|
||||
# save search
|
||||
SaveJson("search", doc, output_directory)
|
||||
|
||||
|
||||
def SaveJson(doc_type, doc, output_directory):
|
||||
|
||||
dir = os.path.join(output_directory, doc_type)
|
||||
if not os.path.exists(dir):
|
||||
os.makedirs(dir)
|
||||
# replace unsupported characters
|
||||
filepath = os.path.join(dir, re.sub(r'[\>\<:"/\\\|\?\*]', '', doc['_id']) + '.json')
|
||||
with open(filepath, 'w') as f:
|
||||
json.dump(doc['_source'], f, indent=2)
|
||||
print("Written {}".format(filepath))
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Export the Kibana dashboards together with"
|
||||
" all used visualizations, searches and index pattern")
|
||||
parser.add_argument("--url",
|
||||
help="Elasticsearch URL. By default: http://localhost:9200",
|
||||
default="http://localhost:9200")
|
||||
parser.add_argument("--regex",
|
||||
help="Regular expression to match all the dashboards to be exported. For example: metricbeat*",
|
||||
required=True)
|
||||
parser.add_argument("--kibana",
|
||||
help="Elasticsearch index where to store the Kibana settings. By default: .kibana ",
|
||||
default=".kibana")
|
||||
parser.add_argument("--dir", help="Output directory. By default: output",
|
||||
default="output")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
print("Export {} dashboards to {} directory".format(args.regex, args.dir))
|
||||
print("Elasticsearch URL: {}".format(args.url))
|
||||
print("Elasticsearch index to store Kibana's"
|
||||
" dashboards: {}".format(args.kibana))
|
||||
|
||||
es = Elasticsearch(args.url)
|
||||
ExportDashboards(es, args.regex, args.kibana, args.dir)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
20
vendor/github.com/elastic/beats/dev-tools/get_version
generated
vendored
Executable file
20
vendor/github.com/elastic/beats/dev-tools/get_version
generated
vendored
Executable file
@ -0,0 +1,20 @@
|
||||
#!/usr/bin/env python
|
||||
import os
|
||||
import argparse
|
||||
|
||||
pattern = '''const defaultBeatVersion = "'''
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Prints the current version at stdout.")
|
||||
parser.parse_args()
|
||||
|
||||
dir = os.path.dirname(os.path.realpath(__file__))
|
||||
with open(dir + "/../libbeat/beat/version.go", "r") as f:
|
||||
for line in f:
|
||||
if line.startswith(pattern):
|
||||
print(line[len(pattern):-2]) # -2 for \n and the final quote
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
62
vendor/github.com/elastic/beats/dev-tools/merge_pr
generated
vendored
Executable file
62
vendor/github.com/elastic/beats/dev-tools/merge_pr
generated
vendored
Executable file
@ -0,0 +1,62 @@
|
||||
#!/usr/bin/env python
|
||||
import sys
|
||||
import argparse
|
||||
from subprocess import check_call, call, check_output
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Creates a PR for merging two branches")
|
||||
parser.add_argument("from_branch",
|
||||
help="From branch (e.g 1.1)")
|
||||
parser.add_argument("to_branch",
|
||||
help="To branch (e.g master)")
|
||||
parser.add_argument("--yes", action="store_true",
|
||||
help="Assume yes. Warning: discards local changes.")
|
||||
parser.add_argument("--continue", action="store_true",
|
||||
help="Continue after fixing merging errors.")
|
||||
args = parser.parse_args()
|
||||
|
||||
tmp_branch = "automatic_merge_from_{}_to_{}_branch".format(
|
||||
args.from_branch, args.to_branch)
|
||||
|
||||
if not vars(args)["continue"]:
|
||||
if not args.yes and raw_input("This will destroy all local changes. " +
|
||||
"Continue? [y/n]: ") != "y":
|
||||
return 1
|
||||
check_call("git reset --hard", shell=True)
|
||||
check_call("git clean -dfx", shell=True)
|
||||
check_call("git fetch", shell=True)
|
||||
|
||||
check_call("git checkout {}".format(args.from_branch), shell=True)
|
||||
check_call("git pull", shell=True)
|
||||
|
||||
check_call("git checkout {}".format(args.to_branch), shell=True)
|
||||
check_call("git pull", shell=True)
|
||||
call("git branch -D {} > /dev/null".format(tmp_branch), shell=True)
|
||||
check_call("git checkout -b {}".format(tmp_branch), shell=True)
|
||||
if call("git merge {}".format(args.from_branch), shell=True) != 0:
|
||||
print("Looks like you have merge errors.")
|
||||
print("Fix them, commit, then run: {} --continue"
|
||||
.format(" ".join(sys.argv)))
|
||||
return 1
|
||||
|
||||
if len(check_output("git status -s", shell=True).strip()) > 0:
|
||||
print("Looks like you have uncommitted changes")
|
||||
return 1
|
||||
|
||||
if len(check_output("git log HEAD...{}".format(args.to_branch),
|
||||
shell=True).strip()) == 0:
|
||||
print("No commit to push")
|
||||
return 1
|
||||
|
||||
print("Ready to push branch.")
|
||||
remote = raw_input("To which remote should I push? (your fork): ")
|
||||
call("git push {} :{} > /dev/null".format(remote, tmp_branch),
|
||||
shell=True)
|
||||
check_call("git push --set-upstream {} {}"
|
||||
.format(remote, tmp_branch), shell=True)
|
||||
print("Done. Go to Github and open the PR")
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
7
vendor/github.com/elastic/beats/dev-tools/packer/.gitignore
generated
vendored
Normal file
7
vendor/github.com/elastic/beats/dev-tools/packer/.gitignore
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
*.swp
|
||||
*.swo
|
||||
/build/
|
||||
/env/
|
||||
|
||||
# copied over from xgo-image/
|
||||
docker/xgo-image-deb6/base/build.sh
|
123
vendor/github.com/elastic/beats/dev-tools/packer/Makefile
generated
vendored
Normal file
123
vendor/github.com/elastic/beats/dev-tools/packer/Makefile
generated
vendored
Normal file
@ -0,0 +1,123 @@
|
||||
BUILDID?=$(shell git rev-parse HEAD)
|
||||
SNAPSHOT?=yes
|
||||
|
||||
makefile_abspath:=$(abspath $(lastword $(MAKEFILE_LIST)))
|
||||
packer_absdir=$(shell dirname ${makefile_abspath})
|
||||
beat_abspath=${GOPATH}/src/${BEAT_DIR}
|
||||
|
||||
|
||||
%/deb: ${BUILD_DIR}/god-linux-386 ${BUILD_DIR}/god-linux-amd64 fpm-image
|
||||
echo Creating DEB packages for $(@D)
|
||||
ARCH=386 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_DIR=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/debian/build.sh
|
||||
ARCH=amd64 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_DIR=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/debian/build.sh
|
||||
|
||||
%/rpm: ${BUILD_DIR}/god-linux-386 ${BUILD_DIR}/god-linux-amd64 fpm-image
|
||||
echo Creating RPM packages for $(@D)
|
||||
ARCH=386 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_DIR=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/centos/build.sh
|
||||
ARCH=amd64 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_DIR=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/centos/build.sh
|
||||
|
||||
%/darwin:
|
||||
echo Creating Darwin packages for $(@D)
|
||||
ARCH=amd64 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_DIR=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/darwin/build.sh
|
||||
|
||||
%/win:
|
||||
echo Creating Darwin packages for $(@D)
|
||||
ARCH=386 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_DIR=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/windows/build.sh
|
||||
ARCH=amd64 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_DIR=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/windows/build.sh
|
||||
|
||||
%/bin:
|
||||
echo Creating Linux packages for $(@D)
|
||||
ARCH=386 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_DIR=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/binary/build.sh
|
||||
ARCH=amd64 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_DIR=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/binary/build.sh
|
||||
|
||||
.PHONY: package-dashboards
|
||||
package-dashboards:
|
||||
echo Creating the Dashboards package
|
||||
BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/dashboards/build.sh
|
||||
|
||||
.PHONY: deps
|
||||
deps:
|
||||
go get -u github.com/tsg/gotpl
|
||||
|
||||
.PHONY: xgo-image
|
||||
xgo-image:
|
||||
cd $(packer_absdir)/docker/xgo-image/; ./build.sh
|
||||
# copy build.sh script in the xgo-image-deb6 to avoid code duplication
|
||||
cp $(packer_absdir)/docker/xgo-image/base/build.sh $(packer_absdir)/docker/xgo-image-deb6/base/build.sh
|
||||
cd $(packer_absdir)/docker/xgo-image-deb6/; ./build.sh
|
||||
|
||||
.PHONY: fpm-image
|
||||
fpm-image:
|
||||
docker build --rm=true -t tudorg/fpm $(packer_absdir)/docker/fpm-image
|
||||
|
||||
.PHONY: go-daemon-image
|
||||
go-daemon-image:
|
||||
docker build --rm=true -t tudorg/go-daemon $(packer_absdir)/docker/go-daemon/
|
||||
|
||||
${BUILD_DIR}/god-linux-386 ${BUILD_DIR}/god-linux-amd64:
|
||||
docker run --rm -v ${BUILD_DIR}:/build tudorg/go-daemon
|
||||
|
||||
${BUILD_DIR}/upload:
|
||||
mkdir -p ${BUILD_DIR}/upload
|
||||
|
||||
${BUILD_DIR}/upload/build_id.txt:
|
||||
echo $(BUILDID) > ${BUILD_DIR}/upload/build_id.txt
|
||||
|
||||
# Build the image required for package-upload.
|
||||
.PHONY: deb-rpm-s3
|
||||
deb-rpm-s3:
|
||||
$(packer_absdir)/docker/deb-rpm-s3/build.sh
|
||||
|
||||
.PHONY: run-interactive-builder-deb6
|
||||
run-interactive-builder-deb6:
|
||||
docker run -t -i -v $(shell pwd)/build:/build \
|
||||
-v $(shell pwd)/xgo-scripts/:/scripts \
|
||||
-v $(shell pwd)/../..:/source \
|
||||
--entrypoint=bash tudorg/beats-builder-deb6
|
||||
|
||||
.PHONY: run-interactive-builder
|
||||
run-interactive-builder:
|
||||
docker run -t -i -v $(shell pwd)/build:/build \
|
||||
-v $(packer_absdir)/xgo-scripts/:/scripts \
|
||||
-v $(shell pwd)/../..:/source \
|
||||
--entrypoint=bash tudorg/beats-builder
|
||||
|
||||
.PHONY: images
|
||||
images: xgo-image fpm-image go-daemon-image
|
||||
|
||||
.PHONY: push-images
|
||||
push-images:
|
||||
docker push tudorg/beats-builder
|
||||
docker push tudorg/beats-builder-deb6
|
||||
docker push tudorg/fpm
|
||||
docker push tudorg/go-daemon
|
||||
|
||||
.PHONY: pull-images
|
||||
pull-images:
|
||||
docker pull tudorg/beats-builder
|
||||
docker pull tudorg/beats-builder-deb6
|
||||
docker pull tudorg/fpm
|
||||
docker pull tudorg/go-daemon
|
||||
|
||||
|
||||
define rm-image =
|
||||
@echo "Cleaning $(1) image..."
|
||||
@if [ $(shell docker ps -n 1 -a -q --filter="image=$(1)" ) ]; then \
|
||||
docker stop $(shell docker ps -a -q --filter="image=$(1)"); \
|
||||
docker rm $(shell docker ps -a -q --filter="image=$(1)"); \
|
||||
fi; \
|
||||
\
|
||||
if [ $(shell docker images -q $(1)) ]; then \
|
||||
docker rmi $(1); \
|
||||
fi
|
||||
endef
|
||||
|
||||
|
||||
.PHONY: clean-images
|
||||
clean-images:
|
||||
@$(call rm-image,tudorg/beats-builder-deb6)
|
||||
@$(call rm-image,tudorg/beats-builder)
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
$(call rm-image,build-image)
|
96
vendor/github.com/elastic/beats/dev-tools/packer/README.md
generated
vendored
Normal file
96
vendor/github.com/elastic/beats/dev-tools/packer/README.md
generated
vendored
Normal file
@ -0,0 +1,96 @@
|
||||
[](https://travis-ci.org/elastic/beats-packer)
|
||||
|
||||
# Beats Packer
|
||||
|
||||
Tools, scripts and docker images for cross-compiling and packaging the Elastic
|
||||
[Beats](https://www.elastic.co/products/beats).
|
||||
|
||||
## Prepare
|
||||
|
||||
You need Go and docker installed. This project uses several docker files, you
|
||||
can either build them with:
|
||||
|
||||
make images
|
||||
|
||||
Or pull them from the Docker registry with:
|
||||
|
||||
make pull-images
|
||||
|
||||
Prepare the rest with:
|
||||
|
||||
make deps
|
||||
|
||||
## Cross-compile
|
||||
|
||||
The cross compilation part is based on [xgo](https://github.com/karalabe/xgo),
|
||||
with some [changes](https://github.com/tsg/xgo) that add a bit more
|
||||
extensibility that we needed for the Beats (e.g. static compiling, custom
|
||||
docker image).
|
||||
|
||||
You can cross-compile one Beat for all platforms with (e.g.):
|
||||
|
||||
make packetbeat
|
||||
|
||||
## Packaging
|
||||
|
||||
For each OS (named platform here) we execute a `build.sh` script which is
|
||||
free to do whatever it is required to build the proper packages for that
|
||||
platform. This can include running docker containers with the right tools
|
||||
included or with that OS installed for native packaging.
|
||||
|
||||
The deb and rpm creation is based on [fpm](https://github.com/jordansissel/fpm)
|
||||
which is executed from a container.
|
||||
|
||||
Besides the platform, there are three other dimensions: architecture,
|
||||
beat and the release. Each of these is defined by YAML files in their folders.
|
||||
These dimensions only set static options, the platforms is the only one
|
||||
scripted.
|
||||
|
||||
The runner is currently (ab)using a Makefile, which is nice because it can
|
||||
parallelize things automatically, but it's hacky so we might replace it in
|
||||
a future.
|
||||
|
||||
Building all Beats for all platforms:
|
||||
|
||||
make clean && make
|
||||
|
||||
## Naming conventions
|
||||
|
||||
We use a set of package name conventions across all the Elastic stack:
|
||||
|
||||
* The general form is `name-version-os-arch.ext`. Note that this means we
|
||||
use dashes even for Deb files.
|
||||
* The archs are called `x86` and `x64` except for deb/rpm where we keep the
|
||||
OS preferred names (i386/amd64, i686/x86_64).
|
||||
* For version strings like `5.0.0-alpha3` we use dashes in all filenames. The
|
||||
only exception is the RPM metadata (not the filename) where we replace the
|
||||
dash with an underscore (`5.0.0_alpha3`).
|
||||
* We omit the release number from the filenames. It's always `1` in the metadata.
|
||||
|
||||
For example, here are the artifacts created for Filebeat:
|
||||
|
||||
```
|
||||
filebeat-5.0.0-amd64.deb
|
||||
filebeat-5.0.0-darwin-x86_64.tar.gz
|
||||
filebeat-5.0.0-i386.deb
|
||||
filebeat-5.0.0-i686.rpm
|
||||
filebeat-5.0.0-linux-x86.tar.gz
|
||||
filebeat-5.0.0-linux-x86_64.tar.gz
|
||||
filebeat-5.0.0-windows-x86.zip
|
||||
filebeat-5.0.0-windows-x86_64.zip
|
||||
filebeat-5.0.0-x86_64.rpm
|
||||
```
|
||||
|
||||
And the SNAPSHOT versions:
|
||||
|
||||
```
|
||||
filebeat-5.0.0-SNAPSHOT-amd64.deb
|
||||
filebeat-5.0.0-SNAPSHOT-darwin-x86_64.tar.gz
|
||||
filebeat-5.0.0-SNAPSHOT-i386.deb
|
||||
filebeat-5.0.0-SNAPSHOT-i686.rpm
|
||||
filebeat-5.0.0-SNAPSHOT-linux-x86.tar.gz
|
||||
filebeat-5.0.0-SNAPSHOT-linux-x86_64.tar.gz
|
||||
filebeat-5.0.0-SNAPSHOT-windows-x86.zip
|
||||
filebeat-5.0.0-SNAPSHOT-windows-x86_64.zip
|
||||
filebeat-5.0.0-SNAPSHOT-x86_64.rpm
|
||||
```
|
5
vendor/github.com/elastic/beats/dev-tools/packer/archs/386.yml
generated
vendored
Normal file
5
vendor/github.com/elastic/beats/dev-tools/packer/archs/386.yml
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
arch: '386'
|
||||
deb_arch: i386
|
||||
rpm_arch: i686
|
||||
bin_arch: x86
|
||||
win_arch: x86
|
5
vendor/github.com/elastic/beats/dev-tools/packer/archs/amd64.yml
generated
vendored
Normal file
5
vendor/github.com/elastic/beats/dev-tools/packer/archs/amd64.yml
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
arch: amd64
|
||||
deb_arch: amd64
|
||||
rpm_arch: x86_64
|
||||
bin_arch: x86_64
|
||||
win_arch: x86_64
|
1
vendor/github.com/elastic/beats/dev-tools/packer/docker/deb-rpm-s3/.gitignore
generated
vendored
Normal file
1
vendor/github.com/elastic/beats/dev-tools/packer/docker/deb-rpm-s3/.gitignore
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
elasticsearch.asc
|
38
vendor/github.com/elastic/beats/dev-tools/packer/docker/deb-rpm-s3/Dockerfile
generated
vendored
Normal file
38
vendor/github.com/elastic/beats/dev-tools/packer/docker/deb-rpm-s3/Dockerfile
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
# Dockerfile for building an image that contains all of the necessary
|
||||
# dependencies for signing deb/rpm packages and publishing APT and YUM
|
||||
# repositories to Amazon S3.
|
||||
FROM debian:jessie
|
||||
|
||||
RUN apt-get update
|
||||
RUN apt-get install -y git \
|
||||
rubygems ruby-dev patch gcc make zlib1g-dev rpm curl dpkg-sig \
|
||||
yum python-deltarpm \
|
||||
expect
|
||||
|
||||
# Install python-boto from source to get latest version.
|
||||
RUN git clone git://github.com/boto/boto.git && \
|
||||
cd boto && \
|
||||
git checkout 2.38.0 && \
|
||||
python setup.py install
|
||||
|
||||
# Install deb-s3
|
||||
RUN gem install deb-s3
|
||||
|
||||
# Install rpm-s3
|
||||
# WARNING: Pulling from master, may not be repeatable.
|
||||
RUN cd /usr/local && \
|
||||
git clone https://github.com/crohr/rpm-s3 --recursive && \
|
||||
echo '[s3]\ncalling_format = boto.s3.connection.OrdinaryCallingFormat' > /etc/boto.cfg
|
||||
# Use HTTP for debugging traffic to S3.
|
||||
#echo '[Boto]\nis_secure = False' >> /etc/boto.cfg
|
||||
ENV PATH /usr/local/rpm-s3/bin:$PATH
|
||||
ADD rpmmacros /root/.rpmmacros
|
||||
|
||||
# Add the scripts that are executed by within the container.
|
||||
ADD *.expect /
|
||||
ADD publish-package-repositories.sh /
|
||||
|
||||
# Execute the publish-package-repositories.sh when the container
|
||||
# is run.
|
||||
ENTRYPOINT ["/publish-package-repositories.sh"]
|
||||
CMD ["--help"]
|
10
vendor/github.com/elastic/beats/dev-tools/packer/docker/deb-rpm-s3/build.sh
generated
vendored
Executable file
10
vendor/github.com/elastic/beats/dev-tools/packer/docker/deb-rpm-s3/build.sh
generated
vendored
Executable file
@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
#
|
||||
# Build script for the deb-rpm-s3 docker container.
|
||||
#
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
docker build -t deb-rpm-s3 .
|
45
vendor/github.com/elastic/beats/dev-tools/packer/docker/deb-rpm-s3/deb-rpm-s3.sh
generated
vendored
Executable file
45
vendor/github.com/elastic/beats/dev-tools/packer/docker/deb-rpm-s3/deb-rpm-s3.sh
generated
vendored
Executable file
@ -0,0 +1,45 @@
|
||||
#!/bin/bash
|
||||
|
||||
#
|
||||
# Wrapper script for starting the docker container.
|
||||
#
|
||||
# You must set AWS_ACCESS_KEY and AWS_SECRET_KEY in your environment prior to
|
||||
# running. You can optionally pass the GPG key's passphrase as the environment
|
||||
# variable PASS.
|
||||
#
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
if [ ! -e "elasticsearch.asc" ]; then
|
||||
cat << EOF
|
||||
You must place a copy of the Elasticsearch GPG signing key (named
|
||||
elasticsearch.asc) into
|
||||
|
||||
$PWD
|
||||
|
||||
prior to building this docker image.
|
||||
|
||||
EOF
|
||||
exit 1
|
||||
fi
|
||||
|
||||
bucket="packages.elasticsearch.org"
|
||||
prefix="beats"
|
||||
dir="/beats-packer/build/upload"
|
||||
gpg_key="/beats-packer/dev-tools/packer/docker/deb-rpm-s3/elasticsearch.asc"
|
||||
origin=Elastic
|
||||
|
||||
docker run -it --rm \
|
||||
--env="PASS=$PASS" \
|
||||
--volume `pwd`/../../../..:/beats-packer \
|
||||
deb-rpm-s3 \
|
||||
--bucket=$bucket \
|
||||
--prefix=$prefix \
|
||||
--directory="$dir" \
|
||||
--aws-access-key="$AWS_ACCESS_KEY" \
|
||||
--aws-secret-key="$AWS_SECRET_KEY" \
|
||||
--gpg-key="$gpg_key" \
|
||||
--origin="$origin" \
|
||||
--verbose \
|
||||
"$@"
|
||||
|
19
vendor/github.com/elastic/beats/dev-tools/packer/docker/deb-rpm-s3/deb-s3.expect
generated
vendored
Normal file
19
vendor/github.com/elastic/beats/dev-tools/packer/docker/deb-rpm-s3/deb-s3.expect
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
#!/usr/bin/expect -f
|
||||
|
||||
# Expect wrapper for deb-s3 that provides the GPG signing password
|
||||
# when prompted.
|
||||
|
||||
spawn deb-s3 upload \
|
||||
--sign \
|
||||
--preserve_versions \
|
||||
--bucket "$env(BUCKET)" \
|
||||
--prefix "$env(PREFIX)/apt" \
|
||||
--arch $env(arch) \
|
||||
-o "$env(ORIGIN)" \
|
||||
{*}$argv
|
||||
expect {
|
||||
"Enter passphrase: " {
|
||||
send -- "$env(PASS)\r"
|
||||
exp_continue
|
||||
}
|
||||
}
|
20
vendor/github.com/elastic/beats/dev-tools/packer/docker/deb-rpm-s3/debsign.expect
generated
vendored
Executable file
20
vendor/github.com/elastic/beats/dev-tools/packer/docker/deb-rpm-s3/debsign.expect
generated
vendored
Executable file
@ -0,0 +1,20 @@
|
||||
#!/usr/bin/expect -f
|
||||
|
||||
# Expect wrapper for 'dpkg-sig --sign' that provides the GPG signing password
|
||||
# when prompted.
|
||||
#
|
||||
# Set password in PASS environment variable prior to running
|
||||
# this expect script.
|
||||
#
|
||||
# Example usage:
|
||||
# expect debsign.expect example.deb
|
||||
#
|
||||
# expect debsign.expect example.deb other.deb
|
||||
|
||||
spawn dpkg-sig --sign builder {*}$argv
|
||||
expect {
|
||||
"Enter passphrase: " {
|
||||
send -- "$env(PASS)\r"
|
||||
exp_continue
|
||||
}
|
||||
}
|
226
vendor/github.com/elastic/beats/dev-tools/packer/docker/deb-rpm-s3/publish-package-repositories.sh
generated
vendored
Executable file
226
vendor/github.com/elastic/beats/dev-tools/packer/docker/deb-rpm-s3/publish-package-repositories.sh
generated
vendored
Executable file
@ -0,0 +1,226 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# Script directory:
|
||||
SDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
|
||||
usage() {
|
||||
cat << EOF
|
||||
Usage: $(basename $0) [-vh] [-d=directory] [-b=bucket] [-p=prefix]
|
||||
[--access-key-id=aws id] [--secret-key-id=aws secret]
|
||||
|
||||
Description: Sign packages and publish them to APT and YUM repositories
|
||||
hosted from an S3 bucket. When publishing, the repository metadata is
|
||||
also signed to prevent tampering.
|
||||
|
||||
You will be prompted once for the GPG signing key's password. If the
|
||||
PASS environment variable is set then that value will be used and you
|
||||
will not be prompted.
|
||||
|
||||
Options:
|
||||
--aws-access-key=AWS_ACCESS_KEY Required. AWS access key. Alternatively,
|
||||
AWS_ACCESS_KEY may be set as an environment
|
||||
variable.
|
||||
|
||||
--aws-secret-key=AWS_SECRET_KEY Required. AWS secret key. Alternatively,
|
||||
AWS_SECRET_KEY may be set as an environment
|
||||
variable.
|
||||
|
||||
-b=BUCKET | --bucket=BUCKET Required. The S3 bucket in which to publish.
|
||||
|
||||
-p=PREFIX | --prefix=PREFIX Required. Path to prefix to all published
|
||||
repositories.
|
||||
|
||||
-d=DIR | --directory=DIR Required. Directory to recursively search
|
||||
for .rpm and .deb files.
|
||||
|
||||
-g=GPG_KEY | --gpg-key=GPG_KEY Optional. Path to GPG key file to import.
|
||||
|
||||
-o=ORIGIN | --origin=ORIGIN Optional. Origin to use in APT repo metadata.
|
||||
|
||||
-v | --verbose Optional. Enable verbose logging to stderr.
|
||||
|
||||
-h | --help Optional. Print this usage information.
|
||||
EOF
|
||||
}
|
||||
|
||||
# Write a debug message to stderr.
|
||||
debug()
|
||||
{
|
||||
if [ "$VERBOSE" == "true" ]; then
|
||||
echo "DEBUG: $1" >&2
|
||||
fi
|
||||
}
|
||||
|
||||
# Write and error message to stderr.
|
||||
err()
|
||||
{
|
||||
echo "ERROR: $1" >&2
|
||||
}
|
||||
|
||||
# Parse command line arguments.
|
||||
parseArgs() {
|
||||
for i in "$@"
|
||||
do
|
||||
case $i in
|
||||
--aws-access-key=*)
|
||||
AWS_ACCESS_KEY="${i#*=}"
|
||||
shift
|
||||
;;
|
||||
--aws-secret-key=*)
|
||||
AWS_SECRET_KEY="${i#*=}"
|
||||
shift
|
||||
;;
|
||||
-b=*|--bucket=*)
|
||||
BUCKET="${i#*=}"
|
||||
shift
|
||||
;;
|
||||
-d=*|--directory=*)
|
||||
DIRECTORY="${i#*=}"
|
||||
shift
|
||||
;;
|
||||
-g=*|--gpg-key=*)
|
||||
GPG_KEY="${i#*=}"
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
usage
|
||||
exit 1
|
||||
;;
|
||||
-o=*|--origin=*)
|
||||
ORIGIN="${i#*=}"
|
||||
shift
|
||||
;;
|
||||
-p=*|--prefix=*)
|
||||
PREFIX="${i#*=}"
|
||||
shift
|
||||
;;
|
||||
-v|--verbose)
|
||||
VERBOSE=true
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
echo "Invalid argument: $i"
|
||||
usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ -z "$BUCKET" ]; then
|
||||
err "-b=BUCKET or --bucket=BUCKET is required."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$DIRECTORY" ]; then
|
||||
err "-d=DIRECTORY or --directory=DIRECTORY is required."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -e "$DIRECTORY" ]; then
|
||||
err "Directory $DIRECTORY does not exists."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$PREFIX" ]; then
|
||||
err "-p=PREFIX or --prefix=PREFIX is required."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$AWS_ACCESS_KEY" ]; then
|
||||
err "--access-key-id=AWS_ACCESS_KEY is required."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$AWS_SECRET_KEY" ]; then
|
||||
err "--secret-access-key-id=AWS_SECRET_KEY is required."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
export BUCKET
|
||||
export ORIGIN
|
||||
export PREFIX
|
||||
export AWS_ACCESS_KEY
|
||||
export AWS_SECRET_KEY
|
||||
}
|
||||
|
||||
importGpg() {
|
||||
if [ ! -z "$GPG_KEY" ]; then
|
||||
if [ ! -f "$GPG_KEY" ]; then
|
||||
err "GPG key file $GPG_KEY does not exists."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
debug "Importing GPG key $GPG_KEY"
|
||||
gpg --import --allow-secret-key-import "$GPG_KEY" | true
|
||||
else
|
||||
debug "Not importing a GPG key because --gpg-key not specified."
|
||||
fi
|
||||
}
|
||||
|
||||
getPassword() {
|
||||
if [ -z "$PASS" ]; then
|
||||
echo -n "Enter GPG pass phrase: "
|
||||
read -s PASS
|
||||
fi
|
||||
|
||||
export PASS
|
||||
}
|
||||
|
||||
signDebianPackages() {
|
||||
debug "Entering signDebianPackages"
|
||||
find $DIRECTORY -name '*.deb' | xargs expect $SDIR/debsign.expect
|
||||
debug "Exiting signDebianPackages"
|
||||
}
|
||||
|
||||
signRpmPackages() {
|
||||
debug "Entering signRpmPackages"
|
||||
find $DIRECTORY -name '*.rpm' | xargs expect $SDIR/rpmsign.expect
|
||||
debug "Exiting signRpmPackages"
|
||||
}
|
||||
|
||||
publishToAptRepo() {
|
||||
debug "Entering publishToAptRepo"
|
||||
|
||||
# Verify the repository and credentials before continuing.
|
||||
deb-s3 verify --bucket "$BUCKET" --prefix "${PREFIX}/apt"
|
||||
|
||||
for arch in i386 amd64
|
||||
do
|
||||
debug "Publishing $arch .deb packages..."
|
||||
export arch
|
||||
|
||||
for deb in $(find "$DIRECTORY" -name "*${arch}.deb")
|
||||
do
|
||||
expect $SDIR/deb-s3.expect "$deb"
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
publishToYumRepo() {
|
||||
debug "Entering publishToYumRepo"
|
||||
|
||||
for arch in i686 x86_64
|
||||
do
|
||||
debug "Publishing $arch .rpm packages..."
|
||||
export arch
|
||||
|
||||
for rpm in $(find "$DIRECTORY" -name "*${arch}.rpm")
|
||||
do
|
||||
expect $SDIR/rpm-s3.expect "$rpm"
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
main() {
|
||||
parseArgs $*
|
||||
importGpg
|
||||
getPassword
|
||||
signDebianPackages
|
||||
signRpmPackages
|
||||
publishToAptRepo
|
||||
publishToYumRepo
|
||||
debug "Success"
|
||||
}
|
||||
|
||||
main $*
|
20
vendor/github.com/elastic/beats/dev-tools/packer/docker/deb-rpm-s3/rpm-s3.expect
generated
vendored
Normal file
20
vendor/github.com/elastic/beats/dev-tools/packer/docker/deb-rpm-s3/rpm-s3.expect
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
#!/usr/bin/expect -f
|
||||
|
||||
# Expect wrapper for rpm-s3 that provides the GPG signing password
|
||||
# when prompted.
|
||||
|
||||
spawn rpm-s3 \
|
||||
-vv \
|
||||
--sign \
|
||||
--region=external-1 \
|
||||
--keep=500 \
|
||||
--visibility=public-read \
|
||||
--bucket=$env(BUCKET) \
|
||||
--repopath=$env(PREFIX)/yum/el/$env(arch) \
|
||||
{*}$argv
|
||||
expect {
|
||||
"Enter passphrase: " {
|
||||
send -- "$env(PASS)\r"
|
||||
exp_continue
|
||||
}
|
||||
}
|
2
vendor/github.com/elastic/beats/dev-tools/packer/docker/deb-rpm-s3/rpmmacros
generated
vendored
Normal file
2
vendor/github.com/elastic/beats/dev-tools/packer/docker/deb-rpm-s3/rpmmacros
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
%_signature gpg
|
||||
%_gpg_name Elasticsearch (Elasticsearch Signing Key) <dev_ops@elasticsearch.org>
|
20
vendor/github.com/elastic/beats/dev-tools/packer/docker/deb-rpm-s3/rpmsign.expect
generated
vendored
Executable file
20
vendor/github.com/elastic/beats/dev-tools/packer/docker/deb-rpm-s3/rpmsign.expect
generated
vendored
Executable file
@ -0,0 +1,20 @@
|
||||
#!/usr/bin/expect -f
|
||||
|
||||
# Expect wrapper for 'rpm --resign' that provides the GPG signing password
|
||||
# when prompted.
|
||||
#
|
||||
# Set password in PASS environment variable prior to running
|
||||
# this expect script.
|
||||
#
|
||||
# Example usage:
|
||||
# expect rpmsign.expect example.rpm
|
||||
#
|
||||
# expect rpmsign.expect example.rpm other.rpm
|
||||
|
||||
spawn rpm --resign \
|
||||
--define "_signature gpg" \
|
||||
--define "_gpg_name Elasticsearch (Elasticsearch Signing Key) <dev_ops@elasticsearch.org>" \
|
||||
{*}$argv
|
||||
expect -exact "Enter pass phrase: "
|
||||
send -- "$env(PASS)\r"
|
||||
expect eof
|
11
vendor/github.com/elastic/beats/dev-tools/packer/docker/fpm-image/Dockerfile
generated
vendored
Normal file
11
vendor/github.com/elastic/beats/dev-tools/packer/docker/fpm-image/Dockerfile
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
FROM ubuntu:16.04
|
||||
|
||||
MAINTAINER Tudor Golubenco <tudor@elastic.co>
|
||||
|
||||
# install fpm
|
||||
RUN \
|
||||
apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
build-essential ruby-dev rpm zip dos2unix libgmp3-dev
|
||||
|
||||
RUN gem install fpm
|
10
vendor/github.com/elastic/beats/dev-tools/packer/docker/go-daemon/Dockerfile
generated
vendored
Normal file
10
vendor/github.com/elastic/beats/dev-tools/packer/docker/go-daemon/Dockerfile
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
FROM tudorg/xgo-base
|
||||
|
||||
MAINTAINER Tudor Golubenco <tudor@elastic.co>
|
||||
|
||||
# Inject the build script
|
||||
ADD build_go_daemon.sh /build_go_daemon.sh
|
||||
ENV BUILD_GO_DAEMON /build_go_daemon.sh
|
||||
RUN chmod +x $BUILD_GO_DAEMON
|
||||
|
||||
ENTRYPOINT ["/build_go_daemon.sh"]
|
17
vendor/github.com/elastic/beats/dev-tools/packer/docker/go-daemon/build_go_daemon.sh
generated
vendored
Normal file
17
vendor/github.com/elastic/beats/dev-tools/packer/docker/go-daemon/build_go_daemon.sh
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
echo "Fetching go-daemon"
|
||||
git clone https://github.com/tsg/go-daemon.git
|
||||
|
||||
cd /go-daemon
|
||||
|
||||
echo "Compiling for linux/amd64.."
|
||||
cc god.c -m64 -o god-linux-amd64 -lpthread -static
|
||||
|
||||
echo "Compiling for linux/i386.."
|
||||
gcc god.c -m32 -o god-linux-386 -lpthread -static
|
||||
|
||||
echo "Copying to host.."
|
||||
cp god-linux-amd64 god-linux-386 /build/
|
52
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb6/base/Dockerfile
generated
vendored
Normal file
52
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb6/base/Dockerfile
generated
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
# Go cross compiler (xgo): Base cross-compilation layer
|
||||
# Copyright (c) 2014 Péter Szilágyi. All rights reserved.
|
||||
#
|
||||
# Released under the MIT license.
|
||||
|
||||
FROM debian:6
|
||||
|
||||
MAINTAINER Tudor Golubenco <tudor@elastic.co>
|
||||
|
||||
# Use sources list from the archive
|
||||
ADD sources.list /etc/apt/sources.list
|
||||
|
||||
# Configure the Go environment, since it's not going to change
|
||||
ENV PATH /usr/local/go/bin:$PATH
|
||||
ENV GOPATH /go
|
||||
|
||||
|
||||
# Inject the remote file fetcher and checksum verifier
|
||||
ADD fetch.sh /fetch.sh
|
||||
ENV FETCH /fetch.sh
|
||||
RUN chmod +x $FETCH
|
||||
|
||||
|
||||
# Make sure apt-get is up to date and dependent packages are installed
|
||||
RUN \
|
||||
apt-get -o Acquire::Check-Valid-Until=false update && \
|
||||
apt-get install -y automake autogen build-essential ca-certificates \
|
||||
gcc-multilib \
|
||||
clang llvm-dev libtool libxml2-dev uuid-dev libssl-dev pkg-config \
|
||||
patch make xz-utils cpio wget unzip git mercurial bzr rsync --no-install-recommends
|
||||
|
||||
# Inject the Go package downloader and tool-chain bootstrapper
|
||||
ADD bootstrap.sh /bootstrap.sh
|
||||
ENV BOOTSTRAP /bootstrap.sh
|
||||
RUN chmod +x $BOOTSTRAP
|
||||
|
||||
# Inject the new Go root distribution downloader and secondary bootstrapper
|
||||
ADD bootstrap_pure.sh /bootstrap_pure.sh
|
||||
ENV BOOTSTRAP_PURE /bootstrap_pure.sh
|
||||
RUN chmod +x $BOOTSTRAP_PURE
|
||||
|
||||
# Inject the C dependency cross compiler
|
||||
ADD build_deps.sh /build_deps.sh
|
||||
ENV BUILD_DEPS /build_deps.sh
|
||||
RUN chmod +x $BUILD_DEPS
|
||||
|
||||
# Inject the container entry point, the build script
|
||||
ADD build.sh /build.sh
|
||||
ENV BUILD /build.sh
|
||||
RUN chmod +x $BUILD
|
||||
|
||||
ENTRYPOINT ["/build.sh"]
|
31
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb6/base/bootstrap.sh
generated
vendored
Normal file
31
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb6/base/bootstrap.sh
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Contains the Go tool-chain bootstrapper, that retrieves all the configured
|
||||
# distribution packages, extracts the binaries and deletes anything not needed.
|
||||
#
|
||||
# Usage: bootstrap.sh
|
||||
#
|
||||
# Needed environment variables:
|
||||
# FETCH - Remote file fetcher and checksum verifier (injected by image)
|
||||
# DIST_LINUX_64, DIST_LINUX_64_SHA1 - 64 bit Linux Go binaries and checksum
|
||||
# DIST_LINUX_32, DIST_LINUX_32_SHA1 - 32 bit Linux Go binaries and checksum
|
||||
# DIST_LINUX_ARM, DIST_LINUX_ARM_SHA1 - ARM v5 Linux Go binaries and checksum
|
||||
# DIST_OSX_64, DIST_OSX_64_SHA1 - 64 bit Mac OSX Go binaries and checksum
|
||||
# DIST_OSX_32, DIST_OSX_32_SHA1 - 32 bit Mac OSX Go binaries and checksum
|
||||
# DIST_WIN_64, DIST_WIN_64_SHA1 - 64 bit Windows Go binaries and checksum
|
||||
# DIST_WIN_32, DIST_WIN_32_SHA1 - 32 bit Windows Go binaries and checksum
|
||||
set -e
|
||||
|
||||
# Download and verify all the binary packages
|
||||
$FETCH $DIST_LINUX_64 $DIST_LINUX_64_SHA1
|
||||
$FETCH $DIST_LINUX_32 $DIST_LINUX_32_SHA1
|
||||
|
||||
# Extract the 64 bit Linux package as the primary Go SDK
|
||||
tar -C /usr/local -xzf `basename $DIST_LINUX_64`
|
||||
|
||||
# Extract all other packages as secondary ones, keeping only the binaries
|
||||
tar -C /usr/local --wildcards -xzf `basename $DIST_LINUX_32` go/pkg/linux_386*
|
||||
GOOS=linux GOARCH=386 /usr/local/go/pkg/tool/linux_amd64/dist bootstrap
|
||||
|
||||
# Delete all the intermediate downloaded files
|
||||
rm -f `basename $DIST_LINUX_64` `basename $DIST_LINUX_32`
|
26
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb6/base/bootstrap_pure.sh
generated
vendored
Normal file
26
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb6/base/bootstrap_pure.sh
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Contains the Go tool-chain pure-Go bootstrapper, that as of Go 1.5, initiates
|
||||
# not only a few pre-built Go cross compilers, but rather bootstraps all of the
|
||||
# supported platforms from the origin Linux amd64 distribution.
|
||||
#
|
||||
# Usage: bootstrap.sh
|
||||
#
|
||||
# Needed environment variables:
|
||||
# FETCH - Remote file fetcher and checksum verifier (injected by image)
|
||||
# ROOT_DIST - 64 bit Linux Go binary distribution package
|
||||
# ROOT_DIST_SHA1 - 64 bit Linux Go distribution package checksum
|
||||
set -e
|
||||
|
||||
# Download, verify and install the root distribution
|
||||
$FETCH $ROOT_DIST $ROOT_DIST_SHA1
|
||||
|
||||
tar -C /usr/local -xzf `basename $ROOT_DIST`
|
||||
rm -f `basename $ROOT_DIST`
|
||||
|
||||
export GOROOT=/usr/local/go
|
||||
export GOROOT_BOOTSTRAP=$GOROOT
|
||||
|
||||
# Pre-build all guest distributions based on the root distribution
|
||||
echo "Bootstrapping linux/386..."
|
||||
GOOS=linux GOARCH=386 CGO_ENABLED=1 go install std
|
36
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb6/base/build_deps.sh
generated
vendored
Normal file
36
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb6/base/build_deps.sh
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Contains the dependency builder to iterate over all installed dependencies
|
||||
# and cross compile them to the requested target platform.
|
||||
#
|
||||
# Usage: build_deps.sh dependency_root_folder dependency1 dependency_2 ...
|
||||
#
|
||||
# Needed environment variables:
|
||||
# CC - C cross compiler to use for the build
|
||||
# HOST - Target platform to build (used to find the needed tool-chains)
|
||||
# PREFIX - File-system path where to install the built binaries
|
||||
# STATIC - true if the libraries are statically linked to the go application
|
||||
set -e
|
||||
|
||||
DEP_ROOT_FOLDER=$1
|
||||
|
||||
# Remove any previous build leftovers, and copy a fresh working set (clean doesn't work for cross compiling)
|
||||
rm -rf /deps-build && cp -r $DEP_ROOT_FOLDER /deps-build
|
||||
|
||||
args=("$@")
|
||||
|
||||
if [ "$STATIC" == "true" ]; then DISABLE_SHARED=-disable-shared; fi
|
||||
|
||||
# Build all the dependencies
|
||||
for ((i=1; i<${#args[@]}; i++)); do
|
||||
dep=${args[i]}
|
||||
echo "Configuring dependency $dep for $HOST..."
|
||||
if [ -f "/deps-build/$dep/autogen.sh" ]; then (cd /deps-build/$dep && ./autogen.sh); fi
|
||||
(cd /deps-build/$dep && ./configure $DISABLE_SHARED --host=$HOST --prefix=$PREFIX --silent)
|
||||
|
||||
echo "Building dependency $dep for $HOST..."
|
||||
(cd /deps-build/$dep && make --silent -j install)
|
||||
done
|
||||
|
||||
# Remove any build artifacts
|
||||
rm -rf /deps-build
|
17
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb6/base/fetch.sh
generated
vendored
Normal file
17
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb6/base/fetch.sh
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Contains a simple fetcher to download a file from a remote URL and verify its
|
||||
# SHA1 checksum.
|
||||
#
|
||||
# Usage: fetch.sh <remote URL> <SHA1 checksum>
|
||||
set -e
|
||||
|
||||
# Pull the file from the remote URL
|
||||
file=`basename $1`
|
||||
echo "Downloading $1..."
|
||||
wget --no-check-certificate -q $1
|
||||
|
||||
# Generate a desired checksum report and check against it
|
||||
echo "$2 $file" > $file.sum
|
||||
sha1sum -c $file.sum
|
||||
rm $file.sum
|
2
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb6/base/sources.list
generated
vendored
Normal file
2
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb6/base/sources.list
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
deb http://archive.debian.org/debian/ squeeze main contrib
|
||||
deb http://archive.debian.org/debian/ squeeze-lts main
|
18
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb6/beats-builder/Dockerfile
generated
vendored
Normal file
18
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb6/beats-builder/Dockerfile
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
FROM tudorg/xgo-deb6-1.7.1
|
||||
|
||||
MAINTAINER Tudor Golubenco <tudor@elastic.co>
|
||||
|
||||
# Get libpcap binaries for linux
|
||||
RUN \
|
||||
mkdir -p /libpcap && \
|
||||
wget http://archive.debian.org/debian/pool/main/libp/libpcap/libpcap0.8-dev_1.1.1-2+squeeze1_i386.deb && \
|
||||
dpkg -x libpcap0.8-dev_*_i386.deb /libpcap/i386 && \
|
||||
wget http://archive.debian.org/debian/pool/main/libp/libpcap/libpcap0.8-dev_1.1.1-2+squeeze1_amd64.deb && \
|
||||
dpkg -x libpcap0.8-dev_*_amd64.deb /libpcap/amd64 && \
|
||||
rm libpcap0.8-dev*.deb
|
||||
RUN \
|
||||
apt-get -o Acquire::Check-Valid-Until=false update && \
|
||||
apt-get install -y libpcap0.8-dev
|
||||
|
||||
# add patch for gopacket
|
||||
ADD gopacket_pcap.patch /gopacket_pcap.patch
|
24
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb6/beats-builder/gopacket_pcap.patch
generated
vendored
Normal file
24
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb6/beats-builder/gopacket_pcap.patch
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
diff --git a/vendor/github.com/tsg/gopacket/pcap/pcap.go b/vendor/github.com/tsg/gopacket/pcap/pcap.go
|
||||
index d2465bb..7b23b84 100644
|
||||
--- a/vendor/github.com/tsg/gopacket/pcap/pcap.go
|
||||
+++ b/vendor/github.com/tsg/gopacket/pcap/pcap.go
|
||||
@@ -8,14 +8,15 @@
|
||||
package pcap
|
||||
|
||||
/*
|
||||
-#cgo linux LDFLAGS: -lpcap
|
||||
+#cgo linux,386 LDFLAGS: /libpcap/i386/usr/lib/libpcap.a
|
||||
+#cgo linux,amd64 LDFLAGS: /libpcap/amd64/usr/lib/libpcap.a
|
||||
#cgo freebsd LDFLAGS: -lpcap
|
||||
#cgo openbsd LDFLAGS: -lpcap
|
||||
#cgo darwin LDFLAGS: -lpcap
|
||||
#cgo solaris LDFLAGS: -lpcap
|
||||
-#cgo windows CFLAGS: -I C:/WpdPack/Include
|
||||
-#cgo windows,386 LDFLAGS: -L C:/WpdPack/Lib -lwpcap
|
||||
-#cgo windows,amd64 LDFLAGS: -L C:/WpdPack/Lib/x64 -lwpcap
|
||||
+#cgo windows CFLAGS: -I /libpcap/win/WpdPack/Include
|
||||
+#cgo windows,386 LDFLAGS: -L /libpcap/win/WpdPack/Lib -lwpcap
|
||||
+#cgo windows,amd64 LDFLAGS: -L /libpcap/win/WpdPack/Lib/x64 -lwpcap
|
||||
#include <stdlib.h>
|
||||
#include <pcap.h>
|
||||
|
5
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb6/build.sh
generated
vendored
Executable file
5
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb6/build.sh
generated
vendored
Executable file
@ -0,0 +1,5 @@
|
||||
#!/bin/sh
|
||||
|
||||
docker build --rm=true -t tudorg/xgo-deb6-base base/ && \
|
||||
docker build --rm=true -t tudorg/xgo-deb6-1.7.1 go-1.7.1/ &&
|
||||
docker build --rm=true -t tudorg/beats-builder-deb6 beats-builder
|
15
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb6/go-1.7.1/Dockerfile
generated
vendored
Normal file
15
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb6/go-1.7.1/Dockerfile
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
# Go cross compiler (xgo): Go 1.7.1 layer
|
||||
# Copyright (c) 2014 Péter Szilágyi. All rights reserved.
|
||||
#
|
||||
# Released under the MIT license.
|
||||
|
||||
FROM tudorg/xgo-deb6-base
|
||||
|
||||
MAINTAINER Tudor Golubenco <tudor@elastic.co>
|
||||
|
||||
# Configure the root Go distribution and bootstrap based on it
|
||||
RUN \
|
||||
export ROOT_DIST="https://storage.googleapis.com/golang/go1.7.1.linux-amd64.tar.gz" && \
|
||||
export ROOT_DIST_SHA1="919ab01305ada0078a9fdf8a12bb56fb0b8a1444" && \
|
||||
\
|
||||
$BOOTSTRAP_PURE
|
74
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/base/Dockerfile
generated
vendored
Normal file
74
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/base/Dockerfile
generated
vendored
Normal file
@ -0,0 +1,74 @@
|
||||
# Go cross compiler (xgo): Base cross-compilation layer
|
||||
# Copyright (c) 2014 Péter Szilágyi. All rights reserved.
|
||||
#
|
||||
# Released under the MIT license.
|
||||
|
||||
FROM ubuntu:14.04
|
||||
|
||||
MAINTAINER Tudor Golubenco <tudor@elastic.co>
|
||||
|
||||
# Configure the Go environment, since it's not going to change
|
||||
ENV PATH /usr/local/go/bin:$PATH
|
||||
ENV GOPATH /go
|
||||
|
||||
|
||||
# Inject the remote file fetcher and checksum verifier
|
||||
ADD fetch.sh /fetch.sh
|
||||
ENV FETCH /fetch.sh
|
||||
RUN chmod +x $FETCH
|
||||
|
||||
|
||||
# Make sure apt-get is up to date and dependent packages are installed
|
||||
RUN \
|
||||
apt-get update && \
|
||||
apt-get install -y automake autogen build-essential ca-certificates \
|
||||
gcc-arm-linux-gnueabi g++-arm-linux-gnueabi libc6-dev-armel-cross \
|
||||
gcc-multilib g++-multilib mingw-w64 clang llvm-dev \
|
||||
libtool libxml2-dev uuid-dev libssl-dev swig pkg-config patch \
|
||||
make xz-utils cpio wget zip unzip p7zip git mercurial bzr texinfo help2man \
|
||||
binutils-multiarch rsync \
|
||||
--no-install-recommends
|
||||
|
||||
# Configure the container for OSX cross compilation
|
||||
# Configure the container for OSX cross compilation
|
||||
ENV OSX_SDK MacOSX10.11.sdk
|
||||
ENV OSX_NDK_X86 /usr/local/osx-ndk-x86
|
||||
|
||||
RUN \
|
||||
OSX_SDK_PATH=https://s3.dockerproject.org/darwin/v2/$OSX_SDK.tar.xz && \
|
||||
$FETCH $OSX_SDK_PATH dd228a335194e3392f1904ce49aff1b1da26ca62 && \
|
||||
\
|
||||
git clone https://github.com/tpoechtrager/osxcross.git && \
|
||||
mv `basename $OSX_SDK_PATH` /osxcross/tarballs/ && \
|
||||
\
|
||||
sed -i -e 's|-march=native||g' /osxcross/build_clang.sh /osxcross/wrapper/build.sh && \
|
||||
UNATTENDED=yes OSX_VERSION_MIN=10.6 /osxcross/build.sh && \
|
||||
mv /osxcross/target $OSX_NDK_X86 && \
|
||||
\
|
||||
rm -rf /osxcross
|
||||
|
||||
ADD patch.tar.xz $OSX_NDK_X86/SDK/$OSX_SDK/usr/include/c++
|
||||
ENV PATH $OSX_NDK_X86/bin:$PATH
|
||||
|
||||
|
||||
# Inject the Go package downloader and tool-chain bootstrapper
|
||||
ADD bootstrap.sh /bootstrap.sh
|
||||
ENV BOOTSTRAP /bootstrap.sh
|
||||
RUN chmod +x $BOOTSTRAP
|
||||
|
||||
# Inject the new Go root distribution downloader and secondary bootstrapper
|
||||
ADD bootstrap_pure.sh /bootstrap_pure.sh
|
||||
ENV BOOTSTRAP_PURE /bootstrap_pure.sh
|
||||
RUN chmod +x $BOOTSTRAP_PURE
|
||||
|
||||
# Inject the C dependency cross compiler
|
||||
ADD build_deps.sh /build_deps.sh
|
||||
ENV BUILD_DEPS /build_deps.sh
|
||||
RUN chmod +x $BUILD_DEPS
|
||||
|
||||
# Inject the container entry point, the build script
|
||||
ADD build.sh /build.sh
|
||||
ENV BUILD /build.sh
|
||||
RUN chmod +x $BUILD
|
||||
|
||||
ENTRYPOINT ["/build.sh"]
|
48
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/base/bootstrap.sh
generated
vendored
Normal file
48
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/base/bootstrap.sh
generated
vendored
Normal file
@ -0,0 +1,48 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Contains the Go tool-chain bootstrapper, that retrieves all the configured
|
||||
# distribution packages, extracts the binaries and deletes anything not needed.
|
||||
#
|
||||
# Usage: bootstrap.sh
|
||||
#
|
||||
# Needed environment variables:
|
||||
# FETCH - Remote file fetcher and checksum verifier (injected by image)
|
||||
# DIST_LINUX_64, DIST_LINUX_64_SHA1 - 64 bit Linux Go binaries and checksum
|
||||
# DIST_LINUX_32, DIST_LINUX_32_SHA1 - 32 bit Linux Go binaries and checksum
|
||||
# DIST_LINUX_ARM, DIST_LINUX_ARM_SHA1 - ARM v5 Linux Go binaries and checksum
|
||||
# DIST_OSX_64, DIST_OSX_64_SHA1 - 64 bit Mac OSX Go binaries and checksum
|
||||
# DIST_OSX_32, DIST_OSX_32_SHA1 - 32 bit Mac OSX Go binaries and checksum
|
||||
# DIST_WIN_64, DIST_WIN_64_SHA1 - 64 bit Windows Go binaries and checksum
|
||||
# DIST_WIN_32, DIST_WIN_32_SHA1 - 32 bit Windows Go binaries and checksum
|
||||
set -e
|
||||
|
||||
# Download and verify all the binary packages
|
||||
$FETCH $DIST_LINUX_64 $DIST_LINUX_64_SHA1
|
||||
$FETCH $DIST_LINUX_32 $DIST_LINUX_32_SHA1
|
||||
$FETCH $DIST_OSX_64 $DIST_OSX_64_SHA1
|
||||
$FETCH $DIST_WIN_64 $DIST_WIN_64_SHA1
|
||||
$FETCH $DIST_WIN_32 $DIST_WIN_32_SHA1
|
||||
|
||||
# Extract the 64 bit Linux package as the primary Go SDK
|
||||
tar -C /usr/local -xzf `basename $DIST_LINUX_64`
|
||||
|
||||
# Extract all other packages as secondary ones, keeping only the binaries
|
||||
tar -C /usr/local --wildcards -xzf `basename $DIST_LINUX_32` go/pkg/linux_386*
|
||||
GOOS=linux GOARCH=386 /usr/local/go/pkg/tool/linux_amd64/dist bootstrap
|
||||
tar -C /usr/local --wildcards -xzf `basename $DIST_LINUX_ARM` go/pkg/linux_arm*
|
||||
GOOS=linux GOARCH=arm /usr/local/go/pkg/tool/linux_amd64/dist bootstrap
|
||||
|
||||
tar -C /usr/local --wildcards -xzf `basename $DIST_OSX_64` go/pkg/darwin_amd64*
|
||||
GOOS=darwin GOARCH=amd64 /usr/local/go/pkg/tool/linux_amd64/dist bootstrap
|
||||
tar -C /usr/local --wildcards -xzf `basename $DIST_OSX_32` go/pkg/darwin_386*
|
||||
GOOS=darwin GOARCH=386 /usr/local/go/pkg/tool/linux_amd64/dist bootstrap
|
||||
|
||||
unzip -d /usr/local -q `basename $DIST_WIN_64` go/pkg/windows_amd64*
|
||||
GOOS=windows GOARCH=amd64 /usr/local/go/pkg/tool/linux_amd64/dist bootstrap
|
||||
unzip -d /usr/local -q `basename $DIST_WIN_32` go/pkg/windows_386*
|
||||
GOOS=windows GOARCH=386 /usr/local/go/pkg/tool/linux_amd64/dist bootstrap
|
||||
|
||||
# Delete all the intermediate downloaded files
|
||||
rm -f `basename $DIST_LINUX_64` `basename $DIST_LINUX_32` `basename $DIST_LINUX_ARM` \
|
||||
`basename $DIST_OSX_64` `basename $DIST_OSX_32` \
|
||||
`basename $DIST_WIN_64` `basename $DIST_WIN_32`
|
41
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/base/bootstrap_pure.sh
generated
vendored
Normal file
41
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/base/bootstrap_pure.sh
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Contains the Go tool-chain pure-Go bootstrapper, that as of Go 1.5, initiates
|
||||
# not only a few pre-built Go cross compilers, but rather bootstraps all of the
|
||||
# supported platforms from the origin Linux amd64 distribution.
|
||||
#
|
||||
# Usage: bootstrap.sh
|
||||
#
|
||||
# Needed environment variables:
|
||||
# FETCH - Remote file fetcher and checksum verifier (injected by image)
|
||||
# ROOT_DIST - 64 bit Linux Go binary distribution package
|
||||
# ROOT_DIST_SHA1 - 64 bit Linux Go distribution package checksum
|
||||
set -e
|
||||
|
||||
# Download, verify and install the root distribution
|
||||
$FETCH $ROOT_DIST $ROOT_DIST_SHA1
|
||||
|
||||
tar -C /usr/local -xzf `basename $ROOT_DIST`
|
||||
rm -f `basename $ROOT_DIST`
|
||||
|
||||
export GOROOT=/usr/local/go
|
||||
export GOROOT_BOOTSTRAP=$GOROOT
|
||||
|
||||
# Pre-build all guest distributions based on the root distribution
|
||||
echo "Bootstrapping linux/386..."
|
||||
GOOS=linux GOARCH=386 CGO_ENABLED=1 go install std
|
||||
|
||||
echo "Bootstrapping linux/arm..."
|
||||
GOOS=linux GOARCH=arm CGO_ENABLED=1 CC=arm-linux-gnueabi-gcc go install std
|
||||
|
||||
echo "Bootstrapping windows/amd64..."
|
||||
GOOS=windows GOARCH=amd64 CGO_ENABLED=1 CC=x86_64-w64-mingw32-gcc go install std
|
||||
|
||||
echo "Bootstrapping windows/386..."
|
||||
GOOS=windows GOARCH=386 CGO_ENABLED=1 CC=i686-w64-mingw32-gcc go install std
|
||||
|
||||
echo "Bootstrapping darwin/amd64..."
|
||||
GOOS=darwin GOARCH=amd64 CGO_ENABLED=1 CC=o64-clang go install std
|
||||
|
||||
echo "Bootstrapping darwin/386..."
|
||||
GOOS=darwin GOARCH=386 CGO_ENABLED=1 CC=o32-clang go install std
|
215
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/base/build.sh
generated
vendored
Normal file
215
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/base/build.sh
generated
vendored
Normal file
@ -0,0 +1,215 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Contains the main cross compiler, that individually sets up each target build
|
||||
# platform, compiles all the C dependencies, then build the requested executable
|
||||
# itself.
|
||||
#
|
||||
# Usage: build.sh <import path>
|
||||
#
|
||||
# Needed environment variables:
|
||||
# REPO_REMOTE - Optional VCS remote if not the primary repository is needed
|
||||
# REPO_BRANCH - Optional VCS branch to use, if not the master branch
|
||||
# DEPS - Optional list of C dependency packages to build
|
||||
# PACK - Optional sub-package, if not the import path is being built
|
||||
# OUT - Optional output prefix to override the package name
|
||||
# FLAG_V - Optional verbosity flag to set on the Go builder
|
||||
# FLAG_RACE - Optional race flag to set on the Go builder
|
||||
# TARGETS - Comma separated list of build targets to compile for
|
||||
|
||||
|
||||
|
||||
# Download the canonical import path (may fail, don't allow failures beyond)
|
||||
SRC_FOLDER=$SOURCE
|
||||
|
||||
BEAT_PATH=$1
|
||||
DST_FOLDER=`dirname $GOPATH/src/$BEAT_PATH`
|
||||
GIT_REPO=$BEAT_PATH
|
||||
|
||||
if [ "$PUREGO" == "yes" ]; then
|
||||
CGO_ENABLED=0
|
||||
else
|
||||
CGO_ENABLED=1
|
||||
fi
|
||||
|
||||
# If it is an official beat, libbeat is not vendored, need special treatment
|
||||
if [[ $GIT_REPO == "github.com/elastic/beats"* ]]; then
|
||||
echo "Overwrite directories because official beat"
|
||||
DST_FOLDER=$GOPATH/src/github.com/elastic/beats
|
||||
GIT_REPO=github.com/elastic/beats
|
||||
fi
|
||||
|
||||
# It is assumed all dependencies are inside the working directory
|
||||
# The working directory is the parent of the beat directory
|
||||
WORKING_DIRECTORY=$DST_FOLDER
|
||||
|
||||
echo "Working directory=$WORKING_DIRECTORY"
|
||||
|
||||
if [ "$SOURCE" != "" ]; then
|
||||
mkdir -p ${DST_FOLDER}
|
||||
echo "Copying main source folder ${SRC_FOLDER} to folder ${DST_FOLDER}"
|
||||
rsync --exclude ".git" --exclude "build/" -a ${SRC_FOLDER}/ ${DST_FOLDER}
|
||||
else
|
||||
mkdir -p $GOPATH/src/${GIT_REPO}
|
||||
cd $GOPATH/src/${GIT_REPO}
|
||||
echo "Fetching main git repository ${GIT_REPO} in folder $GOPATH/src/${GIT_REPO}"
|
||||
git clone https://${GIT_REPO}.git
|
||||
fi
|
||||
|
||||
set -e
|
||||
|
||||
cd $WORKING_DIRECTORY
|
||||
|
||||
# Switch over the code-base to another checkout if requested
|
||||
if [ "$REPO_REMOTE" != "" ]; then
|
||||
echo "Switching over to remote $REPO_REMOTE..."
|
||||
if [ -d ".git" ]; then
|
||||
git remote set-url origin $REPO_REMOTE
|
||||
git pull
|
||||
elif [ -d ".hg" ]; then
|
||||
echo -e "[paths]\ndefault = $REPO_REMOTE\n" >> .hg/hgrc
|
||||
hg pull
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$REPO_BRANCH" != "" ]; then
|
||||
echo "Switching over to branch $REPO_BRANCH..."
|
||||
if [ -d ".git" ]; then
|
||||
git checkout $REPO_BRANCH
|
||||
elif [ -d ".hg" ]; then
|
||||
hg checkout $REPO_BRANCH
|
||||
fi
|
||||
fi
|
||||
|
||||
# Download all the C dependencies
|
||||
echo "Fetching dependencies..."
|
||||
BUILD_DEPS=/build_deps.sh
|
||||
DEPS_FOLDER=/deps
|
||||
LIST_DEPS=""
|
||||
mkdir -p $DEPS_FOLDER
|
||||
DEPS=($DEPS) && for dep in "${DEPS[@]}"; do
|
||||
dep_filename=${dep##*/}
|
||||
echo "Downloading $dep to $DEPS_FOLDER/$dep_filename"
|
||||
wget -q $dep --directory-prefix=$DEPS_FOLDER
|
||||
dep_name=$(tar --list --no-recursion --file=$DEPS_FOLDER/$dep_filename --exclude="*/*" | sed 's/\///g')
|
||||
LIST_DEPS="${LIST_DEPS} ${dep_name}"
|
||||
if [ "${dep_filename##*.}" == "tar" ]; then tar -xf $DEPS_FOLDER/$dep_filename --directory $DEPS_FOLDER/ ; fi
|
||||
if [ "${dep_filename##*.}" == "gz" ]; then tar -xzf $DEPS_FOLDER/$dep_filename --directory $DEPS_FOLDER/ ; fi
|
||||
if [ "${dep_filename##*.}" == "bz2" ]; then tar -xj $DEPS_FOLDER/$dep_filename --directory $DEPS_FOLDER/ ; fi
|
||||
done
|
||||
|
||||
# Configure some global build parameters
|
||||
NAME=${PACK}
|
||||
if [ "$OUT" != "" ]; then
|
||||
NAME=$OUT
|
||||
fi
|
||||
|
||||
|
||||
if [ "$FLAG_V" == "true" ]; then V=-v; fi
|
||||
if [ "$FLAG_RACE" == "true" ]; then R=-race; fi
|
||||
if [ "$STATIC" == "true" ]; then LDARGS=--ldflags\ \'-extldflags\ \"-static\"\'; fi
|
||||
|
||||
if [ -n $BEFORE_BUILD ]; then
|
||||
chmod +x /scripts/$BEFORE_BUILD
|
||||
echo "Execute /scripts/$BEFORE_BUILD ${BEAT_PATH}"
|
||||
/scripts/$BEFORE_BUILD ${BEAT_PATH}
|
||||
fi
|
||||
|
||||
|
||||
# If no build targets were specified, inject a catch all wildcard
|
||||
if [ "$TARGETS" == "" ]; then
|
||||
TARGETS="./."
|
||||
fi
|
||||
|
||||
|
||||
built_targets=0
|
||||
for TARGET in $TARGETS; do
|
||||
# Split the target into platform and architecture
|
||||
XGOOS=`echo $TARGET | cut -d '/' -f 1`
|
||||
XGOARCH=`echo $TARGET | cut -d '/' -f 2`
|
||||
|
||||
# Check and build for Linux targets
|
||||
if ([ $XGOOS == "." ] || [ $XGOOS == "linux" ]) && ([ $XGOARCH == "." ] || [ $XGOARCH == "amd64" ]); then
|
||||
echo "Compiling $PACK for linux/amd64..."
|
||||
HOST=x86_64-linux PREFIX=/usr/local $BUILD_DEPS /deps $LIST_DEPS
|
||||
export PKG_CONFIG_PATH=/usr/aarch64-linux-gnu/lib/pkgconfig
|
||||
|
||||
GOOS=linux GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} go get -d ./$PACK
|
||||
sh -c "GOOS=linux GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} go build $V $R $LDARGS -o $NAME-linux-amd64$R ./$PACK"
|
||||
built_targets=$((built_targets+1))
|
||||
fi
|
||||
if ([ $XGOOS == "." ] || [ $XGOOS == "linux" ]) && ([ $XGOARCH == "." ] || [ $XGOARCH == "386" ]); then
|
||||
echo "Compiling $PACK for linux/386..."
|
||||
CFLAGS=-m32 CXXFLAGS=-m32 LDFLAGS=-m32 HOST=i686-linux PREFIX=/usr/local $BUILD_DEPS /deps $LIST_DEPS
|
||||
GOOS=linux GOARCH=386 CGO_ENABLED=${CGO_ENABLED} go get -d ./$PACK
|
||||
sh -c "GOOS=linux GOARCH=386 CGO_ENABLED=${CGO_ENABLED} go build $V $R $LDARGS -o $NAME-linux-386$R ./$PACK"
|
||||
built_targets=$((built_targets+1))
|
||||
fi
|
||||
if ([ $XGOOS == "." ] || [ $XGOOS == "linux" ]) && ([ $XGOARCH == "." ] || [ $XGOARCH == "arm" ]); then
|
||||
echo "Compiling $PACK for linux/arm..."
|
||||
CC=arm-linux-gnueabi-gcc CXX=rm-linux-gnueabi-g++ HOST=arm-linux PREFIX=/usr/local/arm $BUILD_DEPS /deps $LIST_DEPS
|
||||
|
||||
CC=arm-linux-gnueabi-gcc CXX=rm-linux-gnueabi-g++ GOOS=linux GOARCH=arm CGO_ENABLED=${CGO_ENABLED} GOARM=5 go get -d ./$PACK
|
||||
CC=arm-linux-gnueabi-gcc CXX=rm-linux-gnueabi-g++ GOOS=linux GOARCH=arm CGO_ENABLED=${CGO_ENABLED} GOARM=5 go build $V -o $NAME-linux-arm ./$PACK
|
||||
built_targets=$((built_targets+1))
|
||||
fi
|
||||
|
||||
# Check and build for Windows targets
|
||||
if [ $XGOOS == "." ] || [[ $XGOOS == windows* ]]; then
|
||||
# Split the platform version and configure the Windows NT version
|
||||
PLATFORM=`echo $XGOOS | cut -d '-' -f 2`
|
||||
if [ "$PLATFORM" == "" ] || [ "$PLATFORM" == "." ] || [ "$PLATFORM" == "windows" ]; then
|
||||
PLATFORM=4.0 # Windows NT
|
||||
fi
|
||||
|
||||
MAJOR=`echo $PLATFORM | cut -d '.' -f 1`
|
||||
if [ "${PLATFORM/.}" != "$PLATFORM" ] ; then
|
||||
MINOR=`echo $PLATFORM | cut -d '.' -f 2`
|
||||
fi
|
||||
CGO_NTDEF="-D_WIN32_WINNT=0x`printf "%02d" $MAJOR``printf "%02d" $MINOR`"
|
||||
|
||||
# Build the requested windows binaries
|
||||
if [ $XGOARCH == "." ] || [ $XGOARCH == "amd64" ]; then
|
||||
echo "Compiling $PACK for windows-$PLATFORM/amd64..."
|
||||
CC=x86_64-w64-mingw32-gcc CXX=x86_64-w64-mingw32-g++ CFLAGS="$CGO_NTDEF" CXXFLAGS="$CGO_NTDEF" HOST=x86_64-w64-mingw32 PREFIX=/usr/x86_64-w64-mingw32 $BUILD_DEPS /deps $LIST_DEPS
|
||||
export PKG_CONFIG_PATH=/usr/x86_64-w64-mingw32/lib/pkgconfig
|
||||
|
||||
CC=x86_64-w64-mingw32-gcc CXX=x86_64-w64-mingw32-g++ GOOS=windows GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} CGO_CFLAGS="$CGO_NTDEF" CGO_CXXFLAGS="$CGO_NTDEF" go get -d ./$PACK
|
||||
CC=x86_64-w64-mingw32-gcc CXX=x86_64-w64-mingw32-g++ GOOS=windows GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} CGO_CFLAGS="$CGO_NTDEF" CGO_CXXFLAGS="$CGO_NTDEF" go build $V $R -o $NAME-windows-amd64$R.exe ./$PACK
|
||||
built_targets=$((built_targets+1))
|
||||
fi
|
||||
|
||||
if [ $XGOARCH == "." ] || [ $XGOARCH == "386" ]; then
|
||||
echo "Compiling $PACK for windows-$PLATFORM/386..."
|
||||
CC=i686-w64-mingw32-gcc CXX=i686-w64-mingw32-g++ CFLAGS="$CGO_NTDEF" CXXFLAGS="$CGO_NTDEF" HOST=i686-w64-mingw32 PREFIX=/usr/i686-w64-mingw32 $BUILD_DEPS /deps $LIST_DEPS
|
||||
export PKG_CONFIG_PATH=/usr/i686-w64-mingw32/lib/pkgconfig
|
||||
|
||||
CC=i686-w64-mingw32-gcc CXX=i686-w64-mingw32-g++ GOOS=windows GOARCH=386 CGO_ENABLED=${CGO_ENABLED} CGO_CFLAGS="$CGO_NTDEF" CGO_CXXFLAGS="$CGO_NTDEF" go get -d ./$PACK
|
||||
CC=i686-w64-mingw32-gcc CXX=i686-w64-mingw32-g++ GOOS=windows GOARCH=386 CGO_ENABLED=${CGO_ENABLED} CGO_CFLAGS="$CGO_NTDEF" CGO_CXXFLAGS="$CGO_NTDEF" go build $V -o $NAME-windows-386.exe ./$PACK
|
||||
built_targets=$((built_targets+1))
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check and build for OSX targets
|
||||
if ([ $XGOOS == "." ] || [ $XGOOS == "darwin" ]) && ([ $XGOARCH == "." ] || [ $XGOARCH == "amd64" ]); then
|
||||
echo "Compiling $PACK for darwin/amd64..."
|
||||
CC=o64-clang CXX=o64-clang++ HOST=x86_64-apple-darwin10 PREFIX=/usr/local $BUILD_DEPS /deps $LIST_DEPS
|
||||
CC=o64-clang CXX=o64-clang++ GOOS=darwin GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} go get -d ./$PACK
|
||||
CC=o64-clang CXX=o64-clang++ GOOS=darwin GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} go build -ldflags=-s $V $R -o $NAME-darwin-amd64$R ./$PACK
|
||||
built_targets=$((built_targets+1))
|
||||
fi
|
||||
if ([ $XGOOS == "." ] || [ $XGOOS == "darwin" ]) && ([ $XGOARCH == "." ] || [ $XGOARCH == "386" ]); then
|
||||
echo "Compiling for darwin/386..."
|
||||
CC=o32-clang CXX=o32-clang++ HOST=i386-apple-darwin10 PREFIX=/usr/local $BUILD_DEPS /deps $LIST_DEPS
|
||||
CC=o32-clang CXX=o32-clang++ GOOS=darwin GOARCH=386 CGO_ENABLED=${CGO_ENABLED} go get -d ./$PACK
|
||||
CC=o32-clang CXX=o32-clang++ GOOS=darwin GOARCH=386 CGO_ENABLED=${CGO_ENABLED} go build $V -o $NAME-darwin-386 ./$PACK
|
||||
built_targets=$((built_targets+1))
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
# The binary files are the last created files
|
||||
echo "Moving $built_targets $PACK binaries to host folder..."
|
||||
ls -t | head -n $built_targets
|
||||
cp `ls -t | head -n $built_targets ` /build
|
||||
|
||||
echo "Build process completed"
|
36
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/base/build_deps.sh
generated
vendored
Normal file
36
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/base/build_deps.sh
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Contains the dependency builder to iterate over all installed dependencies
|
||||
# and cross compile them to the requested target platform.
|
||||
#
|
||||
# Usage: build_deps.sh dependency_root_folder dependency1 dependency_2 ...
|
||||
#
|
||||
# Needed environment variables:
|
||||
# CC - C cross compiler to use for the build
|
||||
# HOST - Target platform to build (used to find the needed tool-chains)
|
||||
# PREFIX - File-system path where to install the built binaries
|
||||
# STATIC - true if the libraries are statically linked to the go application
|
||||
set -e
|
||||
|
||||
DEP_ROOT_FOLDER=$1
|
||||
|
||||
# Remove any previous build leftovers, and copy a fresh working set (clean doesn't work for cross compiling)
|
||||
rm -rf /deps-build && cp -r $DEP_ROOT_FOLDER /deps-build
|
||||
|
||||
args=("$@")
|
||||
|
||||
if [ "$STATIC" == "true" ]; then DISABLE_SHARED=-disable-shared; fi
|
||||
|
||||
# Build all the dependencies
|
||||
for ((i=1; i<${#args[@]}; i++)); do
|
||||
dep=${args[i]}
|
||||
echo "Configuring dependency $dep for $HOST..."
|
||||
if [ -f "/deps-build/$dep/autogen.sh" ]; then (cd /deps-build/$dep && ./autogen.sh); fi
|
||||
(cd /deps-build/$dep && ./configure $DISABLE_SHARED --host=$HOST --prefix=$PREFIX --silent)
|
||||
|
||||
echo "Building dependency $dep for $HOST..."
|
||||
(cd /deps-build/$dep && make --silent -j install)
|
||||
done
|
||||
|
||||
# Remove any build artifacts
|
||||
rm -rf /deps-build
|
17
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/base/fetch.sh
generated
vendored
Normal file
17
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/base/fetch.sh
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Contains a simple fetcher to download a file from a remote URL and verify its
|
||||
# SHA1 checksum.
|
||||
#
|
||||
# Usage: fetch.sh <remote URL> <SHA1 checksum>
|
||||
set -e
|
||||
|
||||
# Pull the file from the remote URL
|
||||
file=`basename $1`
|
||||
echo "Downloading $1..."
|
||||
wget -q $1
|
||||
|
||||
# Generate a desired checksum report and check against it
|
||||
echo "$2 $file" > $file.sum
|
||||
sha1sum -c $file.sum
|
||||
rm $file.sum
|
BIN
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/base/patch.tar.xz
generated
vendored
Normal file
BIN
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/base/patch.tar.xz
generated
vendored
Normal file
Binary file not shown.
41
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/beats-builder/Dockerfile
generated
vendored
Normal file
41
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/beats-builder/Dockerfile
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
FROM tudorg/xgo-1.7.1
|
||||
|
||||
MAINTAINER Tudor Golubenco <tudor@elastic.co>
|
||||
|
||||
# Get libpcap binaries for linux
|
||||
RUN \
|
||||
dpkg --add-architecture i386 && \
|
||||
apt-get update && \
|
||||
apt-get install -y libpcap0.8-dev
|
||||
|
||||
RUN \
|
||||
mkdir -p /libpcap && \
|
||||
apt-get download libpcap0.8-dev:i386 && \
|
||||
dpkg -x libpcap0.8-dev_*_i386.deb /libpcap/i386 && \
|
||||
apt-get download libpcap0.8-dev && \
|
||||
dpkg -x libpcap0.8-dev_*_amd64.deb /libpcap/amd64 && \
|
||||
rm libpcap0.8-dev*.deb
|
||||
|
||||
|
||||
# Get libpcap binaries for win
|
||||
ENV WPDPACK_URL https://www.winpcap.org/install/bin/WpdPack_4_1_2.zip
|
||||
RUN \
|
||||
./fetch.sh $WPDPACK_URL f5c80885bd48f07f41833d0f65bf85da1ef1727a && \
|
||||
unzip `basename $WPDPACK_URL` -d /libpcap/win && \
|
||||
rm `basename $WPDPACK_URL`
|
||||
|
||||
# Add patch for gopacket.
|
||||
ADD gopacket_pcap.patch /gopacket_pcap.patch
|
||||
|
||||
# Add the wpcap.dll from the WinPcap_4_1_2.exe installer so that
|
||||
# we can generate a 64-bit compatible libwpcap.a.
|
||||
ENV WINPCAP_DLL_SHA1 d2afb08d0379bd96e423857963791e2ba00c9645
|
||||
ADD wpcap.dll /libpcap/win/wpcap.dll
|
||||
RUN \
|
||||
apt-get install mingw-w64-tools && \
|
||||
cd /libpcap/win && \
|
||||
echo "$WINPCAP_DLL_SHA1 wpcap.dll" | sha1sum -c - && \
|
||||
gendef /libpcap/win/wpcap.dll && \
|
||||
x86_64-w64-mingw32-dlltool --as-flags=--64 -m i386:x86-64 -k --output-lib /libpcap/win/WpdPack/Lib/x64/libwpcap.a --input-def wpcap.def && \
|
||||
rm wpcap.def wpcap.dll
|
||||
|
24
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/beats-builder/gopacket_pcap.patch
generated
vendored
Normal file
24
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/beats-builder/gopacket_pcap.patch
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
diff --git a/vendor/github.com/tsg/gopacket/pcap/pcap.go b/vendor/github.com/tsg/gopacket/pcap/pcap.go
|
||||
index f5612e6..4438e6f 100644
|
||||
--- a/vendor/github.com/tsg/gopacket/pcap/pcap.go
|
||||
+++ b/vendor/github.com/tsg/gopacket/pcap/pcap.go
|
||||
@@ -8,14 +8,15 @@
|
||||
package pcap
|
||||
|
||||
/*
|
||||
-#cgo linux LDFLAGS: -lpcap
|
||||
+#cgo linux,386 LDFLAGS: /libpcap/i386/usr/lib/i386-linux-gnu/libpcap.a
|
||||
+#cgo linux,amd64 LDFLAGS: /libpcap/amd64/usr/lib/x86_64-linux-gnu/libpcap.a
|
||||
#cgo freebsd LDFLAGS: -lpcap
|
||||
#cgo openbsd LDFLAGS: -lpcap
|
||||
#cgo darwin LDFLAGS: -lpcap
|
||||
#cgo solaris LDFLAGS: -lpcap
|
||||
-#cgo windows CFLAGS: -I C:/WpdPack/Include
|
||||
-#cgo windows,386 LDFLAGS: -L C:/WpdPack/Lib -lwpcap
|
||||
-#cgo windows,amd64 LDFLAGS: -L C:/WpdPack/Lib/x64 -lwpcap
|
||||
+#cgo windows CFLAGS: -I /libpcap/win/WpdPack/Include
|
||||
+#cgo windows,386 LDFLAGS: -L /libpcap/win/WpdPack/Lib -lwpcap
|
||||
+#cgo windows,amd64 LDFLAGS: -L /libpcap/win/WpdPack/Lib/x64 -lwpcap
|
||||
#include <stdlib.h>
|
||||
#include <pcap.h>
|
||||
|
BIN
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/beats-builder/wpcap.dll
generated
vendored
Normal file
BIN
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/beats-builder/wpcap.dll
generated
vendored
Normal file
Binary file not shown.
5
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/build.sh
generated
vendored
Executable file
5
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/build.sh
generated
vendored
Executable file
@ -0,0 +1,5 @@
|
||||
#!/bin/sh
|
||||
|
||||
docker build --rm=true -t tudorg/xgo-base base/ && \
|
||||
docker build --rm=true -t tudorg/xgo-1.7.1 go-1.7.1/ &&
|
||||
docker build --rm=true -t tudorg/beats-builder beats-builder
|
15
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/go-1.7.1/Dockerfile
generated
vendored
Normal file
15
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/go-1.7.1/Dockerfile
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
# Go cross compiler (xgo): Go 1.7.1 layer
|
||||
# Copyright (c) 2014 Péter Szilágyi. All rights reserved.
|
||||
#
|
||||
# Released under the MIT license.
|
||||
|
||||
FROM tudorg/xgo-base
|
||||
|
||||
MAINTAINER Tudor Golubenco <tudor@elastic.co>
|
||||
|
||||
# Configure the root Go distribution and bootstrap based on it
|
||||
RUN \
|
||||
export ROOT_DIST="https://storage.googleapis.com/golang/go1.7.1.linux-amd64.tar.gz" && \
|
||||
export ROOT_DIST_SHA1="919ab01305ada0078a9fdf8a12bb56fb0b8a1444" && \
|
||||
\
|
||||
$BOOTSTRAP_PURE
|
1
vendor/github.com/elastic/beats/dev-tools/packer/platforms/README
generated
vendored
Normal file
1
vendor/github.com/elastic/beats/dev-tools/packer/platforms/README
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
Pseudo-platform to build the dashboards in their own package.
|
19
vendor/github.com/elastic/beats/dev-tools/packer/platforms/binary/build.sh
generated
vendored
Executable file
19
vendor/github.com/elastic/beats/dev-tools/packer/platforms/binary/build.sh
generated
vendored
Executable file
@ -0,0 +1,19 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
BASEDIR=$(dirname "$0")
|
||||
ARCHDIR=${BASEDIR}/../../
|
||||
|
||||
# executed from the top directory
|
||||
runid=binary-$BEAT-$ARCH
|
||||
|
||||
cat ${BUILD_DIR}/package.yml ${ARCHDIR}/archs/$ARCH.yml > ${BUILD_DIR}/settings-$runid.yml
|
||||
gotpl ${BASEDIR}/run.sh.j2 < ${BUILD_DIR}/settings-$runid.yml > ${BUILD_DIR}/run-$runid.sh
|
||||
chmod +x ${BUILD_DIR}/run-$runid.sh
|
||||
|
||||
docker run --rm -v ${BUILD_DIR}:/build \
|
||||
-e BUILDID=$BUILDID -e SNAPSHOT=$SNAPSHOT -e RUNID=$runid \
|
||||
tudorg/fpm /build/run-$runid.sh
|
||||
|
||||
rm ${BUILD_DIR}/settings-$runid.yml ${BUILD_DIR}/run-$runid.sh
|
28
vendor/github.com/elastic/beats/dev-tools/packer/platforms/binary/run.sh.j2
generated
vendored
Normal file
28
vendor/github.com/elastic/beats/dev-tools/packer/platforms/binary/run.sh.j2
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
#!/bin/bash
|
||||
|
||||
# this is executed in the docker fpm image
|
||||
set -e
|
||||
cd /build
|
||||
|
||||
# add SNAPSHOT if it was requested
|
||||
VERSION={{.version}}
|
||||
if [ "$SNAPSHOT" = "yes" ]; then
|
||||
VERSION="${VERSION}-SNAPSHOT"
|
||||
fi
|
||||
|
||||
mkdir /{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}
|
||||
cp -a homedir/. /{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}/
|
||||
install -D -m 755 import_dashboards-linux-{{.arch}} /{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}/scripts/import_dashboards
|
||||
cp {{.beat_name}}-linux-{{.arch}} /{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}/{{.beat_name}}
|
||||
cp {{.beat_name}}-linux.yml /{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}/{{.beat_name}}.yml
|
||||
cp {{.beat_name}}-linux.full.yml /{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}/{{.beat_name}}.full.yml
|
||||
cp {{.beat_name}}.template.json /{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}/
|
||||
cp {{.beat_name}}.template-es2x.json /{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}/
|
||||
|
||||
mkdir -p upload
|
||||
tar czvf upload/{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}.tar.gz /{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}
|
||||
echo "Created upload/{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}.tar.gz"
|
||||
|
||||
cd upload
|
||||
sha1sum {{.beat_name}}-${VERSION}-linux-{{.bin_arch}}.tar.gz | awk '{print $1;}' > {{.beat_name}}-${VERSION}-linux-{{.bin_arch}}.tar.gz.sha1
|
||||
echo "Created upload/{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}.tar.gz.sha1"
|
11
vendor/github.com/elastic/beats/dev-tools/packer/platforms/centos/beatname.sh.j2
generated
vendored
Normal file
11
vendor/github.com/elastic/beats/dev-tools/packer/platforms/centos/beatname.sh.j2
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Script to run {.beat_name} in foreground with the same path settings that
|
||||
# the init script / systemd unit file would do.
|
||||
|
||||
/usr/share/{{.beat_name}}/bin/{{.beat_name}} \
|
||||
-path.home /usr/share/{{.beat_name}} \
|
||||
-path.config /etc/{{.beat_name}} \
|
||||
-path.data /var/lib/{{.beat_name}} \
|
||||
-path.logs /var/log/{{.beat_name}} \
|
||||
$@
|
23
vendor/github.com/elastic/beats/dev-tools/packer/platforms/centos/build.sh
generated
vendored
Executable file
23
vendor/github.com/elastic/beats/dev-tools/packer/platforms/centos/build.sh
generated
vendored
Executable file
@ -0,0 +1,23 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
BASEDIR=$(dirname "$0")
|
||||
ARCHDIR=${BASEDIR}/../../
|
||||
|
||||
# executed from the top directory
|
||||
runid=centos-$BEAT-$ARCH
|
||||
|
||||
cat ${BUILD_DIR}/package.yml ${ARCHDIR}/archs/$ARCH.yml > ${BUILD_DIR}/settings-$runid.yml
|
||||
gotpl ${BASEDIR}/run.sh.j2 < ${BUILD_DIR}/settings-$runid.yml > ${BUILD_DIR}/run-$runid.sh
|
||||
chmod +x ${BUILD_DIR}/run-$runid.sh
|
||||
gotpl ${BASEDIR}/init.j2 < ${BUILD_DIR}/settings-$runid.yml > ${BUILD_DIR}/$runid.init
|
||||
gotpl ${BASEDIR}/systemd.j2 < ${BUILD_DIR}/settings-$runid.yml > ${BUILD_DIR}/$runid.service
|
||||
gotpl ${BASEDIR}/beatname.sh.j2 < ${BUILD_DIR}/settings-$runid.yml > ${BUILD_DIR}/beatname-$runid.sh
|
||||
chmod +x ${BUILD_DIR}/beatname-$runid.sh
|
||||
|
||||
docker run --rm -v ${BUILD_DIR}:/build \
|
||||
-e BUILDID=$BUILDID -e SNAPSHOT=$SNAPSHOT -e RUNID=$runid \
|
||||
tudorg/fpm /build/run-$runid.sh
|
||||
|
||||
rm ${BUILD_DIR}/settings-$runid.yml ${BUILD_DIR}/run-$runid.sh
|
110
vendor/github.com/elastic/beats/dev-tools/packer/platforms/centos/init.j2
generated
vendored
Executable file
110
vendor/github.com/elastic/beats/dev-tools/packer/platforms/centos/init.j2
generated
vendored
Executable file
@ -0,0 +1,110 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# {{.beat_name}} {{.beat_name}} shipper
|
||||
#
|
||||
# chkconfig: 2345 98 02
|
||||
# description: Starts and stops a single {{.beat_name}} instance on this system
|
||||
#
|
||||
|
||||
### BEGIN INIT INFO
|
||||
# Provides: {{.beat_name}}
|
||||
# Required-Start: $local_fs $network $syslog
|
||||
# Required-Stop: $local_fs $network $syslog
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: {{.beat_description}}
|
||||
# Description: {{.beat_name}} is a shipper part of the Elastic Beats
|
||||
# family. Please see: https://www.elastic.co/products/beats
|
||||
### END INIT INFO
|
||||
|
||||
|
||||
|
||||
PATH=/usr/bin:/sbin:/bin:/usr/sbin
|
||||
export PATH
|
||||
|
||||
[ -f /etc/sysconfig/{{.beat_name}} ] && . /etc/sysconfig/{{.beat_name}}
|
||||
pidfile=${PIDFILE-/var/run/{{.beat_name}}.pid}
|
||||
agent=${BEATS_AGENT-/usr/share/{{.beat_name}}/bin/{{.beat_name}}}
|
||||
args="-c /etc/{{.beat_name}}/{{.beat_name}}.yml -path.home /usr/share/{{.beat_name}} -path.config /etc/{{.beat_name}} -path.data /var/lib/{{.beat_name}} -path.logs /var/log/{{.beat_name}}"
|
||||
test_args="-e -configtest"
|
||||
wrapper="/usr/share/{{.beat_name}}/bin/{{.beat_name}}-god"
|
||||
wrapperopts="-r / -n -p $pidfile"
|
||||
RETVAL=0
|
||||
|
||||
# Source function library.
|
||||
. /etc/rc.d/init.d/functions
|
||||
|
||||
# Determine if we can use the -p option to daemon, killproc, and status.
|
||||
# RHEL < 5 can't.
|
||||
if status | grep -q -- '-p' 2>/dev/null; then
|
||||
daemonopts="--pidfile $pidfile"
|
||||
pidopts="-p $pidfile"
|
||||
fi
|
||||
|
||||
test() {
|
||||
$agent $args $test_args
|
||||
}
|
||||
|
||||
start() {
|
||||
echo -n $"Starting {{.beat_name}}: "
|
||||
test
|
||||
if [ $? -ne 0 ]; then
|
||||
echo
|
||||
exit 1
|
||||
fi
|
||||
daemon $daemonopts $wrapper $wrapperopts -- $agent $args
|
||||
RETVAL=$?
|
||||
echo
|
||||
return $RETVAL
|
||||
}
|
||||
|
||||
stop() {
|
||||
echo -n $"Stopping {{.beat_name}}: "
|
||||
killproc $pidopts $wrapper
|
||||
RETVAL=$?
|
||||
echo
|
||||
[ $RETVAL = 0 ] && rm -f ${pidfile}
|
||||
}
|
||||
|
||||
restart() {
|
||||
test
|
||||
if [ $? -ne 0 ]; then
|
||||
return 1
|
||||
fi
|
||||
stop
|
||||
start
|
||||
}
|
||||
|
||||
rh_status() {
|
||||
status $pidopts $wrapper
|
||||
RETVAL=$?
|
||||
return $RETVAL
|
||||
}
|
||||
|
||||
rh_status_q() {
|
||||
rh_status >/dev/null 2>&1
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
start
|
||||
;;
|
||||
stop)
|
||||
stop
|
||||
;;
|
||||
restart)
|
||||
restart
|
||||
;;
|
||||
condrestart|try-restart)
|
||||
rh_status_q || exit 0
|
||||
restart
|
||||
;;
|
||||
status)
|
||||
rh_status
|
||||
;;
|
||||
*)
|
||||
echo $"Usage: $0 {start|stop|status|restart|condrestart}"
|
||||
exit 1
|
||||
esac
|
||||
|
||||
exit $RETVAL
|
55
vendor/github.com/elastic/beats/dev-tools/packer/platforms/centos/run.sh.j2
generated
vendored
Normal file
55
vendor/github.com/elastic/beats/dev-tools/packer/platforms/centos/run.sh.j2
generated
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
#!/bin/bash
|
||||
|
||||
# this is executed in the docker fpm image
|
||||
set -e
|
||||
cd /build
|
||||
|
||||
# the init scripts needs to have the right name
|
||||
cp ${RUNID}.init /tmp/{{.beat_name}}.init
|
||||
|
||||
# create script to reload systemd config
|
||||
echo "#!/bin/bash" > /tmp/systemd-daemon-reload.sh
|
||||
echo "systemctl daemon-reload 2> /dev/null || true" >> /tmp/systemd-daemon-reload.sh
|
||||
|
||||
# add SNAPSHOT if it was requested
|
||||
VERSION="{{.version}}"
|
||||
if [ "$SNAPSHOT" = "yes" ]; then
|
||||
VERSION="${VERSION}-SNAPSHOT"
|
||||
fi
|
||||
|
||||
# fpm replaces - with _ in the version
|
||||
RPM_VERSION=`echo ${VERSION} | sed 's/-/_/g'`
|
||||
|
||||
# create rpm
|
||||
fpm --force -s dir -t rpm \
|
||||
-n {{.beat_name}} -v ${RPM_VERSION} \
|
||||
--architecture {{.rpm_arch}} \
|
||||
--vendor "Elastic" \
|
||||
--license "ASL 2.0" \
|
||||
--description "{{.beat_description}}" \
|
||||
--url {{.beat_url}} \
|
||||
--rpm-init /tmp/{{.beat_name}}.init \
|
||||
--after-install /tmp/systemd-daemon-reload.sh \
|
||||
--config-files /etc/{{.beat_name}}/{{.beat_name}}.yml \
|
||||
homedir/=/usr/share/{{.beat_name}} \
|
||||
beatname-${RUNID}.sh=/usr/bin/{{.beat_name}}.sh \
|
||||
{{.beat_name}}-linux-{{.arch}}=/usr/share/{{.beat_name}}/bin/{{.beat_name}} \
|
||||
{{.beat_name}}-linux.yml=/etc/{{.beat_name}}/{{.beat_name}}.yml \
|
||||
{{.beat_name}}-linux.full.yml=/etc/{{.beat_name}}/{{.beat_name}}.full.yml \
|
||||
{{.beat_name}}.template.json=/etc/{{.beat_name}}/{{.beat_name}}.template.json \
|
||||
{{.beat_name}}.template-es2x.json=/etc/{{.beat_name}}/{{.beat_name}}.template-es2x.json \
|
||||
${RUNID}.service=/lib/systemd/system/{{.beat_name}}.service \
|
||||
god-linux-{{.arch}}=/usr/share/{{.beat_name}}/bin/{{.beat_name}}-god \
|
||||
import_dashboards-linux-{{.arch}}=/usr/share/{{.beat_name}}/scripts/import_dashboards
|
||||
|
||||
|
||||
|
||||
# rename so that the filename respects semver rules
|
||||
mkdir -p upload
|
||||
mv {{.beat_name}}-${RPM_VERSION}-1.{{.rpm_arch}}.rpm upload/{{.beat_name}}-${VERSION}-{{.rpm_arch}}.rpm
|
||||
echo "Created upload/{{.beat_name}}-${VERSION}-{{.rpm_arch}}.rpm"
|
||||
|
||||
# create sha1 file
|
||||
cd upload
|
||||
sha1sum {{.beat_name}}-${VERSION}-{{.rpm_arch}}.rpm | awk '{print $1;}' > {{.beat_name}}-${VERSION}-{{.rpm_arch}}.rpm.sha1
|
||||
echo "Created upload/{{.beat_name}}-${VERSION}-{{.rpm_arch}}.rpm.sha1"
|
12
vendor/github.com/elastic/beats/dev-tools/packer/platforms/centos/systemd.j2
generated
vendored
Normal file
12
vendor/github.com/elastic/beats/dev-tools/packer/platforms/centos/systemd.j2
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
[Unit]
|
||||
Description={{.beat_name}}
|
||||
Documentation=https://www.elastic.co/guide/en/beats/{{.beat_name}}/current/index.html
|
||||
Wants=network-online.target
|
||||
After=network-online.target
|
||||
|
||||
[Service]
|
||||
ExecStart=/usr/share/{{.beat_name}}/bin/{{.beat_name}} -c /etc/{{.beat_name}}/{{.beat_name}}.yml -path.home /usr/share/{{.beat_name}} -path.config /etc/{{.beat_name}} -path.data /var/lib/{{.beat_name}} -path.logs /var/log/{{.beat_name}}
|
||||
Restart=always
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
19
vendor/github.com/elastic/beats/dev-tools/packer/platforms/darwin/build.sh
generated
vendored
Executable file
19
vendor/github.com/elastic/beats/dev-tools/packer/platforms/darwin/build.sh
generated
vendored
Executable file
@ -0,0 +1,19 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
BASEDIR=$(dirname "$0")
|
||||
ARCHDIR=${BASEDIR}/../../
|
||||
|
||||
# executed from the top directory
|
||||
runid=darwin-$BEAT-$ARCH
|
||||
|
||||
cat ${BUILD_DIR}/package.yml ${ARCHDIR}/archs/$ARCH.yml > ${BUILD_DIR}/settings-$runid.yml
|
||||
gotpl ${BASEDIR}/run.sh.j2 < ${BUILD_DIR}/settings-$runid.yml > ${BUILD_DIR}/run-$runid.sh
|
||||
chmod +x ${BUILD_DIR}/run-$runid.sh
|
||||
|
||||
docker run --rm -v ${BUILD_DIR}:/build \
|
||||
-e BUILDID=$BUILDID -e SNAPSHOT=$SNAPSHOT -e RUNID=$runid \
|
||||
tudorg/fpm /build/run-$runid.sh
|
||||
|
||||
rm ${BUILD_DIR}/settings-$runid.yml ${BUILD_DIR}/run-$runid.sh
|
28
vendor/github.com/elastic/beats/dev-tools/packer/platforms/darwin/run.sh.j2
generated
vendored
Normal file
28
vendor/github.com/elastic/beats/dev-tools/packer/platforms/darwin/run.sh.j2
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
#!/bin/bash
|
||||
|
||||
# this is executed in the docker fpm image
|
||||
set -e
|
||||
cd /build
|
||||
|
||||
# add SNAPSHOT if it was requested
|
||||
VERSION={{.version}}
|
||||
if [ "$SNAPSHOT" = "yes" ]; then
|
||||
VERSION="${VERSION}-SNAPSHOT"
|
||||
fi
|
||||
|
||||
mkdir /{{.beat_name}}-${VERSION}-darwin-x86_64
|
||||
cp -a homedir/. /{{.beat_name}}-${VERSION}-darwin-x86_64/
|
||||
install -D -m 755 import_dashboards-darwin-{{.arch}} /{{.beat_name}}-${VERSION}-darwin-x86_64/scripts/import_dashboards
|
||||
cp {{.beat_name}}-darwin-amd64 /{{.beat_name}}-${VERSION}-darwin-x86_64/{{.beat_name}}
|
||||
cp {{.beat_name}}-darwin.yml /{{.beat_name}}-${VERSION}-darwin-x86_64/{{.beat_name}}.yml
|
||||
cp {{.beat_name}}-darwin.full.yml /{{.beat_name}}-${VERSION}-darwin-x86_64/{{.beat_name}}.full.yml
|
||||
cp {{.beat_name}}.template.json /{{.beat_name}}-${VERSION}-darwin-x86_64/
|
||||
cp {{.beat_name}}.template-es2x.json /{{.beat_name}}-${VERSION}-darwin-x86_64/
|
||||
|
||||
mkdir -p upload
|
||||
tar czvf upload/{{.beat_name}}-${VERSION}-darwin-x86_64.tar.gz /{{.beat_name}}-${VERSION}-darwin-x86_64
|
||||
echo "Created upload/{{.beat_name}}-${VERSION}-darwin-x86_64.tar.gz"
|
||||
|
||||
cd upload
|
||||
sha1sum {{.beat_name}}-${VERSION}-darwin-x86_64.tar.gz | awk '{print $1;}' > {{.beat_name}}-${VERSION}-darwin-x86_64.tar.gz.sha1
|
||||
echo "Created upload/{{.beat_name}}-${VERSION}-darwin-x86_64.tar.gz.sha1"
|
18
vendor/github.com/elastic/beats/dev-tools/packer/platforms/dashboards/build.sh
generated
vendored
Executable file
18
vendor/github.com/elastic/beats/dev-tools/packer/platforms/dashboards/build.sh
generated
vendored
Executable file
@ -0,0 +1,18 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
BASEDIR=$(dirname "$0")
|
||||
ARCHDIR=${BASEDIR}/../../
|
||||
|
||||
runid=dashboards
|
||||
|
||||
cat ${ARCHDIR}/version.yml > ${BUILD_DIR}/settings-$runid.yml
|
||||
gotpl ${BASEDIR}/run.sh.j2 < ${BUILD_DIR}/settings-$runid.yml > ${BUILD_DIR}/run-$runid.sh
|
||||
chmod +x ${BUILD_DIR}/run-$runid.sh
|
||||
|
||||
docker run --rm -v ${BUILD_DIR}:/build \
|
||||
-e BUILDID=$BUILDID -e SNAPSHOT=$SNAPSHOT -e RUNID=$runid -e BEATNAME=$BEATNAME \
|
||||
tudorg/fpm /build/run-$runid.sh
|
||||
|
||||
rm ${BUILD_DIR}/settings-$runid.yml ${BUILD_DIR}/run-$runid.sh
|
23
vendor/github.com/elastic/beats/dev-tools/packer/platforms/dashboards/run.sh.j2
generated
vendored
Normal file
23
vendor/github.com/elastic/beats/dev-tools/packer/platforms/dashboards/run.sh.j2
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
#!/bin/bash
|
||||
|
||||
# this is executed in the docker fpm image
|
||||
set -e
|
||||
cd /build
|
||||
|
||||
# add SNAPSHOT if it was requested
|
||||
VERSION={{.version}}
|
||||
if [ "$SNAPSHOT" = "yes" ]; then
|
||||
VERSION="${VERSION}-SNAPSHOT"
|
||||
fi
|
||||
|
||||
mkdir /${BEATNAME:-beats}-dashboards-${VERSION}
|
||||
cp -a dashboards/. /${BEATNAME:-beats}-dashboards-${VERSION}/
|
||||
echo "$BUILDID" > /${BEATNAME:-beats}-dashboards-${VERSION}/.build_hash.txt
|
||||
|
||||
mkdir -p upload
|
||||
zip -r upload/${BEATNAME:-beats}-dashboards-${VERSION}.zip /${BEATNAME:-beats}-dashboards-${VERSION}
|
||||
echo "Created upload/${BEATNAME:-beats}-dashboards-${VERSION}.zip"
|
||||
|
||||
cd upload
|
||||
sha1sum ${BEATNAME:-beats}-dashboards-${VERSION}.zip | awk '{print $1;}' > ${BEATNAME:-beats}-dashboards-${VERSION}.zip.sha1
|
||||
echo "Created upload/${BEATNAME:-beats}-dashboards-${VERSION}.zip.sha1"
|
11
vendor/github.com/elastic/beats/dev-tools/packer/platforms/debian/beatname.sh.j2
generated
vendored
Normal file
11
vendor/github.com/elastic/beats/dev-tools/packer/platforms/debian/beatname.sh.j2
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Script to run {{.beat_name}} in foreground with the same path settings that
|
||||
# the init script / systemd unit file would do.
|
||||
|
||||
/usr/share/{{.beat_name}}/bin/{{.beat_name}} \
|
||||
-path.home /usr/share/{{.beat_name}} \
|
||||
-path.config /etc/{{.beat_name}} \
|
||||
-path.data /var/lib/{{.beat_name}} \
|
||||
-path.logs /var/log/{{.beat_name}} \
|
||||
$@
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user