Update to libbeat 5.3.0
This commit is contained in:
parent
c9110990c6
commit
e3b04a53ad
2
Makefile
2
Makefile
|
@ -1,4 +1,4 @@
|
||||||
BEATNAME=icingabeat
|
BEAT_NAME=icingabeat
|
||||||
BEAT_DIR=github.com/icinga/icingabeat
|
BEAT_DIR=github.com/icinga/icingabeat
|
||||||
BEAT_DESCRIPTION=Icingabeat ships Icinga 2 events and states to Elasticsearch or Logstash.
|
BEAT_DESCRIPTION=Icingabeat ships Icinga 2 events and states to Elasticsearch or Logstash.
|
||||||
BEAT_VENDOR=Icinga
|
BEAT_VENDOR=Icinga
|
||||||
|
|
File diff suppressed because one or more lines are too long
|
@ -175,6 +175,10 @@ output.elasticsearch:
|
||||||
# Optional HTTP Path
|
# Optional HTTP Path
|
||||||
#path: "/elasticsearch"
|
#path: "/elasticsearch"
|
||||||
|
|
||||||
|
# Custom HTTP headers to add to each request
|
||||||
|
#headers:
|
||||||
|
# X-My-Header: Contents of the header
|
||||||
|
|
||||||
# Proxy server url
|
# Proxy server url
|
||||||
#proxy_url: http://proxy:3128
|
#proxy_url: http://proxy:3128
|
||||||
|
|
||||||
|
@ -606,6 +610,45 @@ output.elasticsearch:
|
||||||
# the default for the logs path is a logs subdirectory inside the home path.
|
# the default for the logs path is a logs subdirectory inside the home path.
|
||||||
#path.logs: ${path.home}/logs
|
#path.logs: ${path.home}/logs
|
||||||
|
|
||||||
|
#============================== Dashboards =====================================
|
||||||
|
# These settings control loading the sample dashboards to the Kibana index. Loading
|
||||||
|
# the dashboards is disabled by default and can be enabled either by setting the
|
||||||
|
# options here, or by using the `-setup` CLI flag.
|
||||||
|
#dashboards.enabled: false
|
||||||
|
|
||||||
|
# The URL from where to download the dashboards archive. By default this URL
|
||||||
|
# has a value which is computed based on the Beat name and version. For released
|
||||||
|
# versions, this URL points to the dashboard archive on the artifacts.elastic.co
|
||||||
|
# website.
|
||||||
|
#dashboards.url:
|
||||||
|
|
||||||
|
# The directory from where to read the dashboards. It is used instead of the URL
|
||||||
|
# when it has a value.
|
||||||
|
#dashboards.directory:
|
||||||
|
|
||||||
|
# The file archive (zip file) from where to read the dashboards. It is used instead
|
||||||
|
# of the URL when it has a value.
|
||||||
|
#dashboards.file:
|
||||||
|
|
||||||
|
# If this option is enabled, the snapshot URL is used instead of the default URL.
|
||||||
|
#dashboards.snapshot: false
|
||||||
|
|
||||||
|
# The URL from where to download the snapshot version of the dashboards. By default
|
||||||
|
# this has a value which is computed based on the Beat name and version.
|
||||||
|
#dashboards.snapshot_url
|
||||||
|
|
||||||
|
# In case the archive contains the dashboards from multiple Beats, this lets you
|
||||||
|
# select which one to load. You can load all the dashboards in the archive by
|
||||||
|
# setting this to the empty string.
|
||||||
|
#dashboards.beat: icingabeat
|
||||||
|
|
||||||
|
# The name of the Kibana index to use for setting the configuration. Default is ".kibana"
|
||||||
|
#dashboards.kibana_index: .kibana
|
||||||
|
|
||||||
|
# The Elasticsearch index name. This overwrites the index name defined in the
|
||||||
|
# dashboards and index pattern. Example: testbeat-*
|
||||||
|
#dashboards.index:
|
||||||
|
|
||||||
#================================ Logging ======================================
|
#================================ Logging ======================================
|
||||||
# There are three options for the log output: syslog, file, stderr.
|
# There are three options for the log output: syslog, file, stderr.
|
||||||
# Under Windows systems, the log files are per default sent to the file output,
|
# Under Windows systems, the log files are per default sent to the file output,
|
||||||
|
|
|
@ -7,8 +7,9 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"_meta": {
|
"_meta": {
|
||||||
"version": "1.0.0"
|
"version": "5.3.0"
|
||||||
},
|
},
|
||||||
|
"date_detection": false,
|
||||||
"dynamic_templates": [
|
"dynamic_templates": [
|
||||||
{
|
{
|
||||||
"strings_as_keyword": {
|
"strings_as_keyword": {
|
||||||
|
@ -337,6 +338,9 @@
|
||||||
"expiry": {
|
"expiry": {
|
||||||
"type": "date"
|
"type": "date"
|
||||||
},
|
},
|
||||||
|
"fields": {
|
||||||
|
"properties": {}
|
||||||
|
},
|
||||||
"host": {
|
"host": {
|
||||||
"ignore_above": 1024,
|
"ignore_above": 1024,
|
||||||
"index": "not_analyzed",
|
"index": "not_analyzed",
|
||||||
|
|
|
@ -5,8 +5,9 @@
|
||||||
"norms": false
|
"norms": false
|
||||||
},
|
},
|
||||||
"_meta": {
|
"_meta": {
|
||||||
"version": "1.0.0"
|
"version": "5.3.0"
|
||||||
},
|
},
|
||||||
|
"date_detection": false,
|
||||||
"dynamic_templates": [
|
"dynamic_templates": [
|
||||||
{
|
{
|
||||||
"strings_as_keyword": {
|
"strings_as_keyword": {
|
||||||
|
@ -273,6 +274,9 @@
|
||||||
"expiry": {
|
"expiry": {
|
||||||
"type": "date"
|
"type": "date"
|
||||||
},
|
},
|
||||||
|
"fields": {
|
||||||
|
"properties": {}
|
||||||
|
},
|
||||||
"host": {
|
"host": {
|
||||||
"ignore_above": 1024,
|
"ignore_above": 1024,
|
||||||
"type": "keyword"
|
"type": "keyword"
|
||||||
|
|
|
@ -12,6 +12,7 @@
|
||||||
/beats.iml
|
/beats.iml
|
||||||
*.dev.yml
|
*.dev.yml
|
||||||
*.generated.yml
|
*.generated.yml
|
||||||
|
coverage.out
|
||||||
|
|
||||||
# Editor swap files
|
# Editor swap files
|
||||||
*.swp
|
*.swp
|
||||||
|
|
|
@ -74,10 +74,10 @@ matrix:
|
||||||
|
|
||||||
# Generators
|
# Generators
|
||||||
- os: linux
|
- os: linux
|
||||||
env: TARGETS="-C generate/metricbeat/metricset test"
|
env: TARGETS="-C generator/metricbeat test"
|
||||||
go: *go_version
|
go: *go_version
|
||||||
- os: linux
|
- os: linux
|
||||||
env: TARGETS="-C generate/beat test"
|
env: TARGETS="-C generator/beat test"
|
||||||
go: *go_version
|
go: *go_version
|
||||||
|
|
||||||
addons:
|
addons:
|
||||||
|
@ -88,6 +88,8 @@ addons:
|
||||||
- geoip-database
|
- geoip-database
|
||||||
|
|
||||||
before_install:
|
before_install:
|
||||||
|
- umask 022
|
||||||
|
- chmod -R go-w $GOPATH/src/github.com/elastic/beats
|
||||||
# Docker-compose installation
|
# Docker-compose installation
|
||||||
- sudo rm /usr/local/bin/docker-compose || true
|
- sudo rm /usr/local/bin/docker-compose || true
|
||||||
- curl -L https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > docker-compose
|
- curl -L https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > docker-compose
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
// Template, add newest changes here
|
// Template, add newest changes here
|
||||||
|
|
||||||
=== Beats version HEAD
|
=== Beats version HEAD
|
||||||
https://github.com/elastic/beats/compare/v5.2.0...master[Check the HEAD diff]
|
https://github.com/elastic/beats/compare/v5.3.0...master[Check the HEAD diff]
|
||||||
|
|
||||||
==== Breaking changes
|
==== Breaking changes
|
||||||
|
|
||||||
|
@ -24,6 +24,7 @@ https://github.com/elastic/beats/compare/v5.2.0...master[Check the HEAD diff]
|
||||||
|
|
||||||
*Winlogbeat*
|
*Winlogbeat*
|
||||||
|
|
||||||
|
|
||||||
==== Bugfixes
|
==== Bugfixes
|
||||||
|
|
||||||
*Affecting all Beats*
|
*Affecting all Beats*
|
||||||
|
@ -34,10 +35,6 @@ https://github.com/elastic/beats/compare/v5.2.0...master[Check the HEAD diff]
|
||||||
|
|
||||||
*Metricbeat*
|
*Metricbeat*
|
||||||
|
|
||||||
- Fix go routine leak in docker module. {pull}3492[3492]
|
|
||||||
- Fix bug docker module hanging when docker container killed. {issue}3610[3610]
|
|
||||||
- Set timeout to period instead of 1s by default as documented.
|
|
||||||
|
|
||||||
*Packetbeat*
|
*Packetbeat*
|
||||||
|
|
||||||
*Winlogbeat*
|
*Winlogbeat*
|
||||||
|
@ -46,12 +43,12 @@ https://github.com/elastic/beats/compare/v5.2.0...master[Check the HEAD diff]
|
||||||
|
|
||||||
*Affecting all Beats*
|
*Affecting all Beats*
|
||||||
|
|
||||||
|
*Filebeat*
|
||||||
|
|
||||||
*Heartbeat*
|
*Heartbeat*
|
||||||
|
|
||||||
*Metricbeat*
|
*Metricbeat*
|
||||||
|
|
||||||
*Filebeat*
|
|
||||||
|
|
||||||
*Packetbeat*
|
*Packetbeat*
|
||||||
|
|
||||||
*Winlogbeat*
|
*Winlogbeat*
|
||||||
|
@ -70,8 +67,117 @@ https://github.com/elastic/beats/compare/v5.2.0...master[Check the HEAD diff]
|
||||||
|
|
||||||
*Winlogbeat*
|
*Winlogbeat*
|
||||||
|
|
||||||
|
==== Knwon Issue
|
||||||
|
|
||||||
|
*Affecting all Beats*
|
||||||
|
|
||||||
|
*Filebeat*
|
||||||
|
|
||||||
|
*Heartbeat*
|
||||||
|
|
||||||
|
*Metricbeat*
|
||||||
|
|
||||||
|
*Packetbeat*
|
||||||
|
|
||||||
|
*Winlogbeat*
|
||||||
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
[[release-notes-5.3.0]]
|
||||||
|
=== Beats version 5.3.0
|
||||||
|
https://github.com/elastic/beats/compare/v5.2.2...v5.3.0[View commits]
|
||||||
|
|
||||||
|
==== Breaking changes
|
||||||
|
|
||||||
|
*Affecting all Beats*
|
||||||
|
|
||||||
|
- Configuration files must be owned by the user running the Beat or by root, and they must not be writable by others. {pull}3544[3544] {pull}3689[3689]
|
||||||
|
- Change Beat generator. Use `$GOPATH/src/github.com/elastic/beats/script/generate.py` to generate a beat. {pull}3452[3452]
|
||||||
|
|
||||||
|
*Filebeat*
|
||||||
|
|
||||||
|
- Always use absolute path for event and registry. This can lead to issues when relative paths were used before. {pull}3328[3328]
|
||||||
|
|
||||||
|
*Metricbeat*
|
||||||
|
|
||||||
|
- Linux cgroup metrics are now enabled by default for the system process metricset. The configuration option for the feature was renamed from `cgroups` to `process.cgroups.enabled`. {pull}3519[3519]
|
||||||
|
- Change field names `couchbase.node.couch.*.actual_disk_size.*` to `couchbase.node.couch.*.disk_size.*` {pull}3545[3545]
|
||||||
|
|
||||||
|
==== Bugfixes
|
||||||
|
|
||||||
|
*Affecting all Beats*
|
||||||
|
|
||||||
|
- Add `_id`, `_type`, `_index` and `_score` fields in the generated index pattern. {pull}3282[3282]
|
||||||
|
|
||||||
|
*Filebeat*
|
||||||
|
|
||||||
|
- Fix empty registry file on machine crash. {issue}3537[3537]
|
||||||
|
|
||||||
|
*Metricbeat*
|
||||||
|
|
||||||
|
- Add error handling to system process metricset for when Linux cgroups are missing from the kernel. {pull}3692[3692]
|
||||||
|
- Add labels to the Docker healthcheck metricset output. {pull}3707[3707]
|
||||||
|
|
||||||
|
*Winlogbeat*
|
||||||
|
|
||||||
|
- Fix handling of empty strings in event_data. {pull}3705[3705]
|
||||||
|
|
||||||
|
==== Added
|
||||||
|
|
||||||
|
*Affecting all Beats*
|
||||||
|
|
||||||
|
- Files created by Beats (logs, registry, file output) will have 0600 permissions. {pull}3387[3387].
|
||||||
|
- RPM/deb packages will now install the config file with 0600 permissions. {pull}3382[3382]
|
||||||
|
- Add the option to pass custom HTTP headers to the Elasticsearch output. {pull}3400[3400]
|
||||||
|
- Unify `regexp` and `contains` conditionals, for both to support array of strings and convert numbers to strings if required. {pull}3469[3469]
|
||||||
|
- Add the option to load the sample dashboards during the Beat startup phase. {pull}3506[3506]
|
||||||
|
- Disabled date detection in Elasticsearch index templates. Date fields must be explicitly defined in index templates. {pull}3528[3528]
|
||||||
|
- Using environment variables in the configuration file is now GA, instead of experimental. {pull}3525[3525]
|
||||||
|
|
||||||
|
*Filebeat*
|
||||||
|
|
||||||
|
- Add Filebeat modules for system, apache2, mysql, and nginx. {issue}3159[3159]
|
||||||
|
- Add the `pipeline` config option at the prospector level, for configuring the Ingest Node pipeline ID. {pull}3433[3433]
|
||||||
|
- Update regular expressions used for matching file names or lines (multiline, include/exclude functionality) to new matchers improving performance of simple string matches. {pull}3469[3469]
|
||||||
|
- The `symlinks` and `harverster_limit` settings are now GA, instead of experimental. {pull}3525[3525]
|
||||||
|
- close_timeout is also applied when the output is blocking. {pull}3511[3511]
|
||||||
|
- Improve handling of different path variants on Windows. {pull}3781[3781]
|
||||||
|
|
||||||
|
|
||||||
|
*Metricbeat*
|
||||||
|
|
||||||
|
- Add experimental dbstats metricset to MongoDB module. {pull}3228[3228]
|
||||||
|
- Use persistent, direct connections to the configured nodes for MongoDB module. {pull}3228[3228]
|
||||||
|
- Add dynamic configuration reloading for modules. {pull}3281[3281]
|
||||||
|
- Add docker health metricset {pull}3357[3357]
|
||||||
|
- Add docker image metricset {pull}3467[3467]
|
||||||
|
- System module uses new matchers for white-listing processes. {pull}3469[3469]
|
||||||
|
- Add Beta CEPH module with health metricset. {pull}3311[3311]
|
||||||
|
- Add Beta php_fpm module with pool metricset. {pull}3415[3415]
|
||||||
|
- The Docker, Kafka, and Prometheus modules are now Beta, instead of experimental. {pull}3525[3525]
|
||||||
|
- The HAProxy module is now GA, instead of experimental. {pull}3525[3525]
|
||||||
|
- Add the ability to collect the environment variables from system processes. {pull}3337[3337]
|
||||||
|
|
||||||
|
==== Deprecated
|
||||||
|
|
||||||
|
*Affecting all Beats*
|
||||||
|
|
||||||
|
- Usage of field `_type` is deprecated. It should not be used in queries or dashboards. {pull}3409[3409]
|
||||||
|
|
||||||
|
*Filebeat*
|
||||||
|
|
||||||
|
- The experimental `publish_async` option is now deprecated and is planned to be removed in 6.0. {pull}3525[3525]
|
||||||
|
|
||||||
|
[[release-notes-5.2.2]]
|
||||||
|
=== Beats version 5.2.2
|
||||||
|
https://github.com/elastic/beats/compare/v5.2.1...v5.2.2[View commits]
|
||||||
|
|
||||||
|
*Metricbeat*
|
||||||
|
|
||||||
|
- Fix bug docker module hanging when docker container killed. {issue}3610[3610]
|
||||||
|
- Set timeout to period instead of 1s by default as documented. {pull}3612[3612]
|
||||||
|
|
||||||
[[release-notes-5.2.1]]
|
[[release-notes-5.2.1]]
|
||||||
=== Beats version 5.2.1
|
=== Beats version 5.2.1
|
||||||
https://github.com/elastic/beats/compare/v5.2.0...v5.2.1[View commits]
|
https://github.com/elastic/beats/compare/v5.2.0...v5.2.1[View commits]
|
||||||
|
@ -90,7 +196,6 @@ https://github.com/elastic/beats/compare/v5.2.0...v5.2.1[View commits]
|
||||||
|
|
||||||
- Fix error in the Winlogbeat sample dashboard. {pull}3548[3548]
|
- Fix error in the Winlogbeat sample dashboard. {pull}3548[3548]
|
||||||
|
|
||||||
|
|
||||||
[[release-notes-5.2.0]]
|
[[release-notes-5.2.0]]
|
||||||
=== Beats version 5.2.0
|
=== Beats version 5.2.0
|
||||||
https://github.com/elastic/beats/compare/v5.1.2...v5.2.0[View commits]
|
https://github.com/elastic/beats/compare/v5.1.2...v5.2.0[View commits]
|
||||||
|
@ -167,7 +272,6 @@ https://github.com/elastic/beats/compare/v5.1.1...v5.1.2[View commits]
|
||||||
|
|
||||||
- Fix for "The array bounds are invalid" error when reading large events. {issue}3076[3076]
|
- Fix for "The array bounds are invalid" error when reading large events. {issue}3076[3076]
|
||||||
|
|
||||||
|
|
||||||
[[release-notes-5.1.1]]
|
[[release-notes-5.1.1]]
|
||||||
=== Beats version 5.1.1
|
=== Beats version 5.1.1
|
||||||
https://github.com/elastic/beats/compare/v5.0.2...v5.1.1[View commits]
|
https://github.com/elastic/beats/compare/v5.0.2...v5.1.1[View commits]
|
||||||
|
@ -209,7 +313,7 @@ https://github.com/elastic/beats/compare/v5.0.2...v5.1.1[View commits]
|
||||||
- Add support for MongoDB 3.4 and WiredTiger metrics. {pull}2999[2999]
|
- Add support for MongoDB 3.4 and WiredTiger metrics. {pull}2999[2999]
|
||||||
- Add experimental kafka module with partition metricset. {pull}2969[2969]
|
- Add experimental kafka module with partition metricset. {pull}2969[2969]
|
||||||
- Add raw config option for mysql/status metricset. {pull}3001[3001]
|
- Add raw config option for mysql/status metricset. {pull}3001[3001]
|
||||||
- Add command fileds for mysql/status metricset. {pull}3251[3251]
|
- Add command fields for mysql/status metricset. {pull}3251[3251]
|
||||||
|
|
||||||
*Filebeat*
|
*Filebeat*
|
||||||
|
|
||||||
|
|
|
@ -57,10 +57,12 @@ The location where you clone is important. Please clone under the source
|
||||||
directory of your `GOPATH`. If you don't have `GOPATH` already set, you can
|
directory of your `GOPATH`. If you don't have `GOPATH` already set, you can
|
||||||
simply set it to your home directory (`export GOPATH=$HOME`).
|
simply set it to your home directory (`export GOPATH=$HOME`).
|
||||||
|
|
||||||
$ mkdir -p $GOPATH/src/github.com/elastic
|
$ mkdir -p ${GOPATH}/src/github.com/elastic
|
||||||
$ cd $GOPATH/src/github.com/elastic
|
$ cd ${GOPATH}/src/github.com/elastic
|
||||||
$ git clone https://github.com/elastic/beats.git
|
$ git clone https://github.com/elastic/beats.git
|
||||||
|
|
||||||
|
Note: If you have multiple go paths use `${GOPATH%%:*}`instead of `${GOPATH}`.
|
||||||
|
|
||||||
Then you can compile a particular Beat by using the Makefile. For example, for
|
Then you can compile a particular Beat by using the Makefile. For example, for
|
||||||
Packetbeat:
|
Packetbeat:
|
||||||
|
|
||||||
|
|
|
@ -11,7 +11,7 @@ SNAPSHOT?=yes
|
||||||
.PHONY: testsuite
|
.PHONY: testsuite
|
||||||
testsuite:
|
testsuite:
|
||||||
$(foreach var,$(PROJECTS),$(MAKE) -C $(var) testsuite || exit 1;)
|
$(foreach var,$(PROJECTS),$(MAKE) -C $(var) testsuite || exit 1;)
|
||||||
#$(MAKE) -C generate test
|
#$(MAKE) -C generator test
|
||||||
|
|
||||||
stop-environments:
|
stop-environments:
|
||||||
$(foreach var,$(PROJECTS_ENV),$(MAKE) -C $(var) stop-environment || exit 0;)
|
$(foreach var,$(PROJECTS_ENV),$(MAKE) -C $(var) stop-environment || exit 0;)
|
||||||
|
@ -40,25 +40,24 @@ coverage-report:
|
||||||
|
|
||||||
.PHONY: update
|
.PHONY: update
|
||||||
update:
|
update:
|
||||||
$(MAKE) -C libbeat collect
|
$(foreach var,$(PROJECTS),$(MAKE) -C $(var) update || exit 1;)
|
||||||
$(foreach var,$(BEATS),$(MAKE) -C $(var) update || exit 1;)
|
|
||||||
|
|
||||||
.PHONY: clean
|
.PHONY: clean
|
||||||
clean:
|
clean:
|
||||||
rm -rf build
|
rm -rf build
|
||||||
$(foreach var,$(PROJECTS),$(MAKE) -C $(var) clean || exit 1;)
|
$(foreach var,$(PROJECTS),$(MAKE) -C $(var) clean || exit 1;)
|
||||||
$(MAKE) -C generate clean
|
$(MAKE) -C generator clean
|
||||||
|
|
||||||
# Cleans up the vendor directory from unnecessary files
|
# Cleans up the vendor directory from unnecessary files
|
||||||
# This should always be run after updating the dependencies
|
# This should always be run after updating the dependencies
|
||||||
.PHONY: clean-vendor
|
.PHONY: clean-vendor
|
||||||
clean-vendor:
|
clean-vendor:
|
||||||
sh scripts/clean_vendor.sh
|
sh script/clean_vendor.sh
|
||||||
|
|
||||||
.PHONY: check
|
.PHONY: check
|
||||||
check:
|
check:
|
||||||
$(foreach var,$(PROJECTS),$(MAKE) -C $(var) check || exit 1;)
|
$(foreach var,$(PROJECTS),$(MAKE) -C $(var) check || exit 1;)
|
||||||
# Validate that all updates were commited
|
# Validate that all updates were committed
|
||||||
$(MAKE) update
|
$(MAKE) update
|
||||||
git update-index --refresh
|
git update-index --refresh
|
||||||
git diff-index --exit-code HEAD --
|
git diff-index --exit-code HEAD --
|
||||||
|
@ -96,6 +95,8 @@ package: update beats-dashboards
|
||||||
mkdir -p build/upload/
|
mkdir -p build/upload/
|
||||||
$(foreach var,$(BEATS),cp -r $(var)/build/upload/ build/upload/$(var) || exit 1;)
|
$(foreach var,$(BEATS),cp -r $(var)/build/upload/ build/upload/$(var) || exit 1;)
|
||||||
cp -r build/dashboards-upload build/upload/dashboards
|
cp -r build/dashboards-upload build/upload/dashboards
|
||||||
|
# Run tests on the generated packages.
|
||||||
|
go test ./dev-tools/package_test.go -files "${shell pwd}/build/upload/*/*"
|
||||||
|
|
||||||
# Upload nightly builds to S3
|
# Upload nightly builds to S3
|
||||||
.PHONY: upload-nightlies-s3
|
.PHONY: upload-nightlies-s3
|
||||||
|
@ -115,7 +116,5 @@ upload-release:
|
||||||
aws s3 cp --recursive --acl public-read build/upload s3://download.elasticsearch.org/beats/
|
aws s3 cp --recursive --acl public-read build/upload s3://download.elasticsearch.org/beats/
|
||||||
|
|
||||||
.PHONY: notice
|
.PHONY: notice
|
||||||
notice:
|
notice:
|
||||||
python dev-tools/generate_notice.py .
|
python dev-tools/generate_notice.py .
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -433,6 +433,31 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
github.com/mitchellh/hashstructure
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2016 Mitchell Hashimoto
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
THE SOFTWARE.
|
||||||
|
|
||||||
--------------------------------------------------------------------
|
--------------------------------------------------------------------
|
||||||
github.com/mitchellh/mapstructure
|
github.com/mitchellh/mapstructure
|
||||||
--------------------------------------------------------------------
|
--------------------------------------------------------------------
|
||||||
|
@ -968,34 +993,78 @@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
--------------------------------------------------------------------
|
--------------------------------------------------------------------
|
||||||
github.com/mitchellh/hashstructure
|
golang.org/x/crypto
|
||||||
--------------------------------------------------------------------
|
--------------------------------------------------------------------
|
||||||
The MIT License (MIT)
|
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||||
|
|
||||||
Copyright (c) 2016 Mitchell Hashimoto
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
* Redistributions of source code must retain the above copyright
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
notice, this list of conditions and the following disclaimer.
|
||||||
in the Software without restriction, including without limitation the rights
|
* Redistributions in binary form must reproduce the above
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
in the documentation and/or other materials provided with the
|
||||||
furnished to do so, subject to the following conditions:
|
distribution.
|
||||||
|
* Neither the name of Google Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
all copies or substantial portions of the Software.
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
github.com/docker/docker
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
Apache License
|
||||||
|
|
||||||
|
-------NOTICE-----
|
||||||
|
Docker
|
||||||
|
Copyright 2012-2016 Docker, Inc.
|
||||||
|
|
||||||
|
This product includes software developed at Docker, Inc. (https://www.docker.com).
|
||||||
|
|
||||||
|
This product contains software (https://github.com/kr/pty) developed
|
||||||
|
by Keith Rarick, licensed under the MIT License.
|
||||||
|
|
||||||
|
The following is courtesy of our legal counsel:
|
||||||
|
|
||||||
|
|
||||||
|
Use and transfer of Docker may be subject to certain restrictions by the
|
||||||
|
United States and other governments.
|
||||||
|
It is your responsibility to ensure that your use and/or transfer does not
|
||||||
|
violate applicable laws.
|
||||||
|
|
||||||
|
For more information, please see https://www.bis.doc.gov
|
||||||
|
|
||||||
|
See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.
|
||||||
|
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
github.com/docker/go-connections
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
Apache License
|
||||||
|
|
||||||
|
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
github.com/docker/go-units
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
Apache License
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
||||||
THE SOFTWARE.
|
|
||||||
|
|
||||||
--------------------------------------------------------------------
|
--------------------------------------------------------------------
|
||||||
github.com/fsouza/go-dockerclient
|
github.com/fsouza/go-dockerclient
|
||||||
--------------------------------------------------------------------
|
--------------------------------------------------------------------
|
||||||
Copyright (c) 2016, go-dockerclient authors
|
Copyright (c) 2013-2017, go-dockerclient authors
|
||||||
All rights reserved.
|
All rights reserved.
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
@ -1018,6 +1087,448 @@ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
github.com/hashicorp/go-cleanhttp
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
Mozilla Public License, version 2.0
|
||||||
|
|
||||||
|
1. Definitions
|
||||||
|
|
||||||
|
1.1. "Contributor"
|
||||||
|
|
||||||
|
means each individual or legal entity that creates, contributes to the
|
||||||
|
creation of, or owns Covered Software.
|
||||||
|
|
||||||
|
1.2. "Contributor Version"
|
||||||
|
|
||||||
|
means the combination of the Contributions of others (if any) used by a
|
||||||
|
Contributor and that particular Contributor's Contribution.
|
||||||
|
|
||||||
|
1.3. "Contribution"
|
||||||
|
|
||||||
|
means Covered Software of a particular Contributor.
|
||||||
|
|
||||||
|
1.4. "Covered Software"
|
||||||
|
|
||||||
|
means Source Code Form to which the initial Contributor has attached the
|
||||||
|
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||||
|
Modifications of such Source Code Form, in each case including portions
|
||||||
|
thereof.
|
||||||
|
|
||||||
|
1.5. "Incompatible With Secondary Licenses"
|
||||||
|
means
|
||||||
|
|
||||||
|
a. that the initial Contributor has attached the notice described in
|
||||||
|
Exhibit B to the Covered Software; or
|
||||||
|
|
||||||
|
b. that the Covered Software was made available under the terms of
|
||||||
|
version 1.1 or earlier of the License, but not also under the terms of
|
||||||
|
a Secondary License.
|
||||||
|
|
||||||
|
1.6. "Executable Form"
|
||||||
|
|
||||||
|
means any form of the work other than Source Code Form.
|
||||||
|
|
||||||
|
1.7. "Larger Work"
|
||||||
|
|
||||||
|
means a work that combines Covered Software with other material, in a
|
||||||
|
separate file or files, that is not Covered Software.
|
||||||
|
|
||||||
|
1.8. "License"
|
||||||
|
|
||||||
|
means this document.
|
||||||
|
|
||||||
|
1.9. "Licensable"
|
||||||
|
|
||||||
|
means having the right to grant, to the maximum extent possible, whether
|
||||||
|
at the time of the initial grant or subsequently, any and all of the
|
||||||
|
rights conveyed by this License.
|
||||||
|
|
||||||
|
1.10. "Modifications"
|
||||||
|
|
||||||
|
means any of the following:
|
||||||
|
|
||||||
|
a. any file in Source Code Form that results from an addition to,
|
||||||
|
deletion from, or modification of the contents of Covered Software; or
|
||||||
|
|
||||||
|
b. any new file in Source Code Form that contains any Covered Software.
|
||||||
|
|
||||||
|
1.11. "Patent Claims" of a Contributor
|
||||||
|
|
||||||
|
means any patent claim(s), including without limitation, method,
|
||||||
|
process, and apparatus claims, in any patent Licensable by such
|
||||||
|
Contributor that would be infringed, but for the grant of the License,
|
||||||
|
by the making, using, selling, offering for sale, having made, import,
|
||||||
|
or transfer of either its Contributions or its Contributor Version.
|
||||||
|
|
||||||
|
1.12. "Secondary License"
|
||||||
|
|
||||||
|
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||||
|
General Public License, Version 2.1, the GNU Affero General Public
|
||||||
|
License, Version 3.0, or any later versions of those licenses.
|
||||||
|
|
||||||
|
1.13. "Source Code Form"
|
||||||
|
|
||||||
|
means the form of the work preferred for making modifications.
|
||||||
|
|
||||||
|
1.14. "You" (or "Your")
|
||||||
|
|
||||||
|
means an individual or a legal entity exercising rights under this
|
||||||
|
License. For legal entities, "You" includes any entity that controls, is
|
||||||
|
controlled by, or is under common control with You. For purposes of this
|
||||||
|
definition, "control" means (a) the power, direct or indirect, to cause
|
||||||
|
the direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||||
|
outstanding shares or beneficial ownership of such entity.
|
||||||
|
|
||||||
|
|
||||||
|
2. License Grants and Conditions
|
||||||
|
|
||||||
|
2.1. Grants
|
||||||
|
|
||||||
|
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||||
|
non-exclusive license:
|
||||||
|
|
||||||
|
a. under intellectual property rights (other than patent or trademark)
|
||||||
|
Licensable by such Contributor to use, reproduce, make available,
|
||||||
|
modify, display, perform, distribute, and otherwise exploit its
|
||||||
|
Contributions, either on an unmodified basis, with Modifications, or
|
||||||
|
as part of a Larger Work; and
|
||||||
|
|
||||||
|
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||||
|
sale, have made, import, and otherwise transfer either its
|
||||||
|
Contributions or its Contributor Version.
|
||||||
|
|
||||||
|
2.2. Effective Date
|
||||||
|
|
||||||
|
The licenses granted in Section 2.1 with respect to any Contribution
|
||||||
|
become effective for each Contribution on the date the Contributor first
|
||||||
|
distributes such Contribution.
|
||||||
|
|
||||||
|
2.3. Limitations on Grant Scope
|
||||||
|
|
||||||
|
The licenses granted in this Section 2 are the only rights granted under
|
||||||
|
this License. No additional rights or licenses will be implied from the
|
||||||
|
distribution or licensing of Covered Software under this License.
|
||||||
|
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
||||||
|
Contributor:
|
||||||
|
|
||||||
|
a. for any code that a Contributor has removed from Covered Software; or
|
||||||
|
|
||||||
|
b. for infringements caused by: (i) Your and any other third party's
|
||||||
|
modifications of Covered Software, or (ii) the combination of its
|
||||||
|
Contributions with other software (except as part of its Contributor
|
||||||
|
Version); or
|
||||||
|
|
||||||
|
c. under Patent Claims infringed by Covered Software in the absence of
|
||||||
|
its Contributions.
|
||||||
|
|
||||||
|
This License does not grant any rights in the trademarks, service marks,
|
||||||
|
or logos of any Contributor (except as may be necessary to comply with
|
||||||
|
the notice requirements in Section 3.4).
|
||||||
|
|
||||||
|
2.4. Subsequent Licenses
|
||||||
|
|
||||||
|
No Contributor makes additional grants as a result of Your choice to
|
||||||
|
distribute the Covered Software under a subsequent version of this
|
||||||
|
License (see Section 10.2) or under the terms of a Secondary License (if
|
||||||
|
permitted under the terms of Section 3.3).
|
||||||
|
|
||||||
|
2.5. Representation
|
||||||
|
|
||||||
|
Each Contributor represents that the Contributor believes its
|
||||||
|
Contributions are its original creation(s) or it has sufficient rights to
|
||||||
|
grant the rights to its Contributions conveyed by this License.
|
||||||
|
|
||||||
|
2.6. Fair Use
|
||||||
|
|
||||||
|
This License is not intended to limit any rights You have under
|
||||||
|
applicable copyright doctrines of fair use, fair dealing, or other
|
||||||
|
equivalents.
|
||||||
|
|
||||||
|
2.7. Conditions
|
||||||
|
|
||||||
|
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||||
|
Section 2.1.
|
||||||
|
|
||||||
|
|
||||||
|
3. Responsibilities
|
||||||
|
|
||||||
|
3.1. Distribution of Source Form
|
||||||
|
|
||||||
|
All distribution of Covered Software in Source Code Form, including any
|
||||||
|
Modifications that You create or to which You contribute, must be under
|
||||||
|
the terms of this License. You must inform recipients that the Source
|
||||||
|
Code Form of the Covered Software is governed by the terms of this
|
||||||
|
License, and how they can obtain a copy of this License. You may not
|
||||||
|
attempt to alter or restrict the recipients' rights in the Source Code
|
||||||
|
Form.
|
||||||
|
|
||||||
|
3.2. Distribution of Executable Form
|
||||||
|
|
||||||
|
If You distribute Covered Software in Executable Form then:
|
||||||
|
|
||||||
|
a. such Covered Software must also be made available in Source Code Form,
|
||||||
|
as described in Section 3.1, and You must inform recipients of the
|
||||||
|
Executable Form how they can obtain a copy of such Source Code Form by
|
||||||
|
reasonable means in a timely manner, at a charge no more than the cost
|
||||||
|
of distribution to the recipient; and
|
||||||
|
|
||||||
|
b. You may distribute such Executable Form under the terms of this
|
||||||
|
License, or sublicense it under different terms, provided that the
|
||||||
|
license for the Executable Form does not attempt to limit or alter the
|
||||||
|
recipients' rights in the Source Code Form under this License.
|
||||||
|
|
||||||
|
3.3. Distribution of a Larger Work
|
||||||
|
|
||||||
|
You may create and distribute a Larger Work under terms of Your choice,
|
||||||
|
provided that You also comply with the requirements of this License for
|
||||||
|
the Covered Software. If the Larger Work is a combination of Covered
|
||||||
|
Software with a work governed by one or more Secondary Licenses, and the
|
||||||
|
Covered Software is not Incompatible With Secondary Licenses, this
|
||||||
|
License permits You to additionally distribute such Covered Software
|
||||||
|
under the terms of such Secondary License(s), so that the recipient of
|
||||||
|
the Larger Work may, at their option, further distribute the Covered
|
||||||
|
Software under the terms of either this License or such Secondary
|
||||||
|
License(s).
|
||||||
|
|
||||||
|
3.4. Notices
|
||||||
|
|
||||||
|
You may not remove or alter the substance of any license notices
|
||||||
|
(including copyright notices, patent notices, disclaimers of warranty, or
|
||||||
|
limitations of liability) contained within the Source Code Form of the
|
||||||
|
Covered Software, except that You may alter any license notices to the
|
||||||
|
extent required to remedy known factual inaccuracies.
|
||||||
|
|
||||||
|
3.5. Application of Additional Terms
|
||||||
|
|
||||||
|
You may choose to offer, and to charge a fee for, warranty, support,
|
||||||
|
indemnity or liability obligations to one or more recipients of Covered
|
||||||
|
Software. However, You may do so only on Your own behalf, and not on
|
||||||
|
behalf of any Contributor. You must make it absolutely clear that any
|
||||||
|
such warranty, support, indemnity, or liability obligation is offered by
|
||||||
|
You alone, and You hereby agree to indemnify every Contributor for any
|
||||||
|
liability incurred by such Contributor as a result of warranty, support,
|
||||||
|
indemnity or liability terms You offer. You may include additional
|
||||||
|
disclaimers of warranty and limitations of liability specific to any
|
||||||
|
jurisdiction.
|
||||||
|
|
||||||
|
4. Inability to Comply Due to Statute or Regulation
|
||||||
|
|
||||||
|
If it is impossible for You to comply with any of the terms of this License
|
||||||
|
with respect to some or all of the Covered Software due to statute,
|
||||||
|
judicial order, or regulation then You must: (a) comply with the terms of
|
||||||
|
this License to the maximum extent possible; and (b) describe the
|
||||||
|
limitations and the code they affect. Such description must be placed in a
|
||||||
|
text file included with all distributions of the Covered Software under
|
||||||
|
this License. Except to the extent prohibited by statute or regulation,
|
||||||
|
such description must be sufficiently detailed for a recipient of ordinary
|
||||||
|
skill to be able to understand it.
|
||||||
|
|
||||||
|
5. Termination
|
||||||
|
|
||||||
|
5.1. The rights granted under this License will terminate automatically if You
|
||||||
|
fail to comply with any of its terms. However, if You become compliant,
|
||||||
|
then the rights granted under this License from a particular Contributor
|
||||||
|
are reinstated (a) provisionally, unless and until such Contributor
|
||||||
|
explicitly and finally terminates Your grants, and (b) on an ongoing
|
||||||
|
basis, if such Contributor fails to notify You of the non-compliance by
|
||||||
|
some reasonable means prior to 60 days after You have come back into
|
||||||
|
compliance. Moreover, Your grants from a particular Contributor are
|
||||||
|
reinstated on an ongoing basis if such Contributor notifies You of the
|
||||||
|
non-compliance by some reasonable means, this is the first time You have
|
||||||
|
received notice of non-compliance with this License from such
|
||||||
|
Contributor, and You become compliant prior to 30 days after Your receipt
|
||||||
|
of the notice.
|
||||||
|
|
||||||
|
5.2. If You initiate litigation against any entity by asserting a patent
|
||||||
|
infringement claim (excluding declaratory judgment actions,
|
||||||
|
counter-claims, and cross-claims) alleging that a Contributor Version
|
||||||
|
directly or indirectly infringes any patent, then the rights granted to
|
||||||
|
You by any and all Contributors for the Covered Software under Section
|
||||||
|
2.1 of this License shall terminate.
|
||||||
|
|
||||||
|
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||||
|
license agreements (excluding distributors and resellers) which have been
|
||||||
|
validly granted by You or Your distributors under this License prior to
|
||||||
|
termination shall survive termination.
|
||||||
|
|
||||||
|
6. Disclaimer of Warranty
|
||||||
|
|
||||||
|
Covered Software is provided under this License on an "as is" basis,
|
||||||
|
without warranty of any kind, either expressed, implied, or statutory,
|
||||||
|
including, without limitation, warranties that the Covered Software is free
|
||||||
|
of defects, merchantable, fit for a particular purpose or non-infringing.
|
||||||
|
The entire risk as to the quality and performance of the Covered Software
|
||||||
|
is with You. Should any Covered Software prove defective in any respect,
|
||||||
|
You (not any Contributor) assume the cost of any necessary servicing,
|
||||||
|
repair, or correction. This disclaimer of warranty constitutes an essential
|
||||||
|
part of this License. No use of any Covered Software is authorized under
|
||||||
|
this License except under this disclaimer.
|
||||||
|
|
||||||
|
7. Limitation of Liability
|
||||||
|
|
||||||
|
Under no circumstances and under no legal theory, whether tort (including
|
||||||
|
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||||
|
distributes Covered Software as permitted above, be liable to You for any
|
||||||
|
direct, indirect, special, incidental, or consequential damages of any
|
||||||
|
character including, without limitation, damages for lost profits, loss of
|
||||||
|
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses, even if such party shall have been
|
||||||
|
informed of the possibility of such damages. This limitation of liability
|
||||||
|
shall not apply to liability for death or personal injury resulting from
|
||||||
|
such party's negligence to the extent applicable law prohibits such
|
||||||
|
limitation. Some jurisdictions do not allow the exclusion or limitation of
|
||||||
|
incidental or consequential damages, so this exclusion and limitation may
|
||||||
|
not apply to You.
|
||||||
|
|
||||||
|
8. Litigation
|
||||||
|
|
||||||
|
Any litigation relating to this License may be brought only in the courts
|
||||||
|
of a jurisdiction where the defendant maintains its principal place of
|
||||||
|
business and such litigation shall be governed by laws of that
|
||||||
|
jurisdiction, without reference to its conflict-of-law provisions. Nothing
|
||||||
|
in this Section shall prevent a party's ability to bring cross-claims or
|
||||||
|
counter-claims.
|
||||||
|
|
||||||
|
9. Miscellaneous
|
||||||
|
|
||||||
|
This License represents the complete agreement concerning the subject
|
||||||
|
matter hereof. If any provision of this License is held to be
|
||||||
|
unenforceable, such provision shall be reformed only to the extent
|
||||||
|
necessary to make it enforceable. Any law or regulation which provides that
|
||||||
|
the language of a contract shall be construed against the drafter shall not
|
||||||
|
be used to construe this License against a Contributor.
|
||||||
|
|
||||||
|
|
||||||
|
10. Versions of the License
|
||||||
|
|
||||||
|
10.1. New Versions
|
||||||
|
|
||||||
|
Mozilla Foundation is the license steward. Except as provided in Section
|
||||||
|
10.3, no one other than the license steward has the right to modify or
|
||||||
|
publish new versions of this License. Each version will be given a
|
||||||
|
distinguishing version number.
|
||||||
|
|
||||||
|
10.2. Effect of New Versions
|
||||||
|
|
||||||
|
You may distribute the Covered Software under the terms of the version
|
||||||
|
of the License under which You originally received the Covered Software,
|
||||||
|
or under the terms of any subsequent version published by the license
|
||||||
|
steward.
|
||||||
|
|
||||||
|
10.3. Modified Versions
|
||||||
|
|
||||||
|
If you create software not governed by this License, and you want to
|
||||||
|
create a new license for such software, you may create and use a
|
||||||
|
modified version of this License if you rename the license and remove
|
||||||
|
any references to the name of the license steward (except to note that
|
||||||
|
such modified license differs from this License).
|
||||||
|
|
||||||
|
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
||||||
|
Licenses If You choose to distribute Source Code Form that is
|
||||||
|
Incompatible With Secondary Licenses under the terms of this version of
|
||||||
|
the License, the notice described in Exhibit B of this License must be
|
||||||
|
attached.
|
||||||
|
|
||||||
|
Exhibit A - Source Code Form License Notice
|
||||||
|
|
||||||
|
This Source Code Form is subject to the
|
||||||
|
terms of the Mozilla Public License, v.
|
||||||
|
2.0. If a copy of the MPL was not
|
||||||
|
distributed with this file, You can
|
||||||
|
obtain one at
|
||||||
|
http://mozilla.org/MPL/2.0/.
|
||||||
|
|
||||||
|
If it is not possible or desirable to put the notice in a particular file,
|
||||||
|
then You may include the notice in a location (such as a LICENSE file in a
|
||||||
|
relevant directory) where a recipient would be likely to look for such a
|
||||||
|
notice.
|
||||||
|
|
||||||
|
You may add additional accurate notices of copyright ownership.
|
||||||
|
|
||||||
|
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
||||||
|
|
||||||
|
This Source Code Form is "Incompatible
|
||||||
|
With Secondary Licenses", as defined by
|
||||||
|
the Mozilla Public License, v. 2.0.
|
||||||
|
|
||||||
|
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
github.com/Microsoft/go-winio
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2015 Microsoft
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
|
||||||
|
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
github.com/opencontainers/runc
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
Apache License
|
||||||
|
|
||||||
|
-------NOTICE-----
|
||||||
|
runc
|
||||||
|
|
||||||
|
Copyright 2012-2015 Docker, Inc.
|
||||||
|
|
||||||
|
This product includes software developed at Docker, Inc. (http://www.docker.com).
|
||||||
|
|
||||||
|
The following is courtesy of our legal counsel:
|
||||||
|
|
||||||
|
|
||||||
|
Use and transfer of Docker may be subject to certain restrictions by the
|
||||||
|
United States and other governments.
|
||||||
|
It is your responsibility to ensure that your use and/or transfer does not
|
||||||
|
violate applicable laws.
|
||||||
|
|
||||||
|
For more information, please see http://www.bis.doc.gov
|
||||||
|
|
||||||
|
See also http://www.apache.org/dev/crypto.html and/or seek legal counsel.
|
||||||
|
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
github.com/Sirupsen/logrus
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2014 Simon Eskildsen
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
THE SOFTWARE.
|
||||||
|
|
||||||
--------------------------------------------------------------------
|
--------------------------------------------------------------------
|
||||||
github.com/go-sql-driver/mysql
|
github.com/go-sql-driver/mysql
|
||||||
--------------------------------------------------------------------
|
--------------------------------------------------------------------
|
||||||
|
|
|
@ -21,6 +21,7 @@ framework for creating Beats, and all the officially supported Beats:
|
||||||
Beat | Description
|
Beat | Description
|
||||||
--- | ---
|
--- | ---
|
||||||
[Filebeat](https://github.com/elastic/beats/tree/master/filebeat) | Tails and ships log files
|
[Filebeat](https://github.com/elastic/beats/tree/master/filebeat) | Tails and ships log files
|
||||||
|
[Heartbeat](https://github.com/elastic/beats/tree/master/heartbeat) | Ping remote services for availability
|
||||||
[Metricbeat](https://github.com/elastic/beats/tree/master/metricbeat) | Fetches sets of metrics from the operating system and services
|
[Metricbeat](https://github.com/elastic/beats/tree/master/metricbeat) | Fetches sets of metrics from the operating system and services
|
||||||
[Packetbeat](https://github.com/elastic/beats/tree/master/packetbeat) | Monitors the network and applications by sniffing packets
|
[Packetbeat](https://github.com/elastic/beats/tree/master/packetbeat) | Monitors the network and applications by sniffing packets
|
||||||
[Winlogbeat](https://github.com/elastic/beats/tree/master/winlogbeat) | Fetches and ships Windows Event logs
|
[Winlogbeat](https://github.com/elastic/beats/tree/master/winlogbeat) | Fetches and ships Windows Event logs
|
||||||
|
@ -38,6 +39,7 @@ on the [elastic.co site](https://www.elastic.co/guide/):
|
||||||
|
|
||||||
* [Beats platform](https://www.elastic.co/guide/en/beats/libbeat/current/index.html)
|
* [Beats platform](https://www.elastic.co/guide/en/beats/libbeat/current/index.html)
|
||||||
* [Filebeat](https://www.elastic.co/guide/en/beats/filebeat/current/index.html)
|
* [Filebeat](https://www.elastic.co/guide/en/beats/filebeat/current/index.html)
|
||||||
|
* [Heartbeat](https://www.elastic.co/guide/en/beats/heartbeat/current/index.html)
|
||||||
* [Metricbeat](https://www.elastic.co/guide/en/beats/metricbeat/current/index.html)
|
* [Metricbeat](https://www.elastic.co/guide/en/beats/metricbeat/current/index.html)
|
||||||
* [Packetbeat](https://www.elastic.co/guide/en/beats/packetbeat/current/index.html)
|
* [Packetbeat](https://www.elastic.co/guide/en/beats/packetbeat/current/index.html)
|
||||||
* [Winlogbeat](https://www.elastic.co/guide/en/beats/winlogbeat/current/index.html)
|
* [Winlogbeat](https://www.elastic.co/guide/en/beats/winlogbeat/current/index.html)
|
||||||
|
|
|
@ -9,7 +9,7 @@
|
||||||
# This box is used as a Windows development and testing environment for Beats.
|
# This box is used as a Windows development and testing environment for Beats.
|
||||||
#
|
#
|
||||||
# Usage and Features:
|
# Usage and Features:
|
||||||
# - Two users exist: Administartor and Vagrant. Both have the password: vagrant
|
# - Two users exist: Administrator and Vagrant. Both have the password: vagrant
|
||||||
# - Use 'vagrant ssh' to open a Windows command prompt.
|
# - Use 'vagrant ssh' to open a Windows command prompt.
|
||||||
# - Use 'vagrant rdp' to open a Windows Remote Deskop session. Mac users must
|
# - Use 'vagrant rdp' to open a Windows Remote Deskop session. Mac users must
|
||||||
# install the Microsoft Remote Desktop Client from the App Store.
|
# install the Microsoft Remote Desktop Client from the App Store.
|
||||||
|
|
|
@ -44,7 +44,7 @@ pip install -r requirements.txt
|
||||||
|
|
||||||
This creates the environment that contains all the python packages required to
|
This creates the environment that contains all the python packages required to
|
||||||
run the `export_dashboards.py` script. Thus, for the next runs you just need
|
run the `export_dashboards.py` script. Thus, for the next runs you just need
|
||||||
to enable the enviroment:
|
to enable the environment:
|
||||||
|
|
||||||
```
|
```
|
||||||
. env/bin/activate
|
. env/bin/activate
|
||||||
|
|
|
@ -8,6 +8,7 @@ import sys
|
||||||
import argparse
|
import argparse
|
||||||
import fnmatch
|
import fnmatch
|
||||||
|
|
||||||
|
|
||||||
def main(arguments):
|
def main(arguments):
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description=__doc__,
|
parser = argparse.ArgumentParser(description=__doc__,
|
||||||
|
@ -34,7 +35,7 @@ def main(arguments):
|
||||||
if not line.startswith('mode:') and "vendor" not in line:
|
if not line.startswith('mode:') and "vendor" not in line:
|
||||||
(position, stmt, count) = line.split(" ")
|
(position, stmt, count) = line.split(" ")
|
||||||
stmt = int(stmt)
|
stmt = int(stmt)
|
||||||
count = int (count)
|
count = int(count)
|
||||||
prev_count = 0
|
prev_count = 0
|
||||||
if lines.has_key(position):
|
if lines.has_key(position):
|
||||||
(_, prev_stmt, prev_count) = lines[position]
|
(_, prev_stmt, prev_count) = lines[position]
|
||||||
|
|
|
@ -49,7 +49,7 @@ def main():
|
||||||
|
|
||||||
print args
|
print args
|
||||||
|
|
||||||
tmp_branch = "backport_{}".format(args.pr_number)
|
tmp_branch = "backport_{}_{}".format(args.pr_number, args.to_branch)
|
||||||
|
|
||||||
if not vars(args)["continue"]:
|
if not vars(args)["continue"]:
|
||||||
if not args.yes and raw_input("This will destroy all local changes. " +
|
if not args.yes and raw_input("This will destroy all local changes. " +
|
||||||
|
|
BIN
vendor/github.com/elastic/beats/dev-tools/cmd/import_dashboards/import_dashboards
generated
vendored
Executable file
BIN
vendor/github.com/elastic/beats/dev-tools/cmd/import_dashboards/import_dashboards
generated
vendored
Executable file
Binary file not shown.
|
@ -49,7 +49,6 @@ def add_licenses(f, licenses):
|
||||||
f.write(read_file(notice_file))
|
f.write(read_file(notice_file))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def create_notice(filename, beat, copyright, licenses):
|
def create_notice(filename, beat, copyright, licenses):
|
||||||
|
|
||||||
now = datetime.datetime.now()
|
now = datetime.datetime.now()
|
||||||
|
|
|
@ -0,0 +1,18 @@
|
||||||
|
package: github.com/elastic/beats/dev-tools
|
||||||
|
import: []
|
||||||
|
testImports:
|
||||||
|
- name: github.com/blakesmith/ar
|
||||||
|
version: 8bd4349a67f2533b078dbc524689d15dba0f4659
|
||||||
|
- name: github.com/cavaliercoder/go-rpm
|
||||||
|
version: 9664735b838ea0a81e4aace3197ebe0d4040f952
|
||||||
|
- name: golang.org/x/crypto
|
||||||
|
version: 2f8be38b9a7533b8763d48273737ff6e90428a96
|
||||||
|
subpackages:
|
||||||
|
- cast5
|
||||||
|
- openpgp
|
||||||
|
- openpgp/armor
|
||||||
|
- openpgp/elgamal
|
||||||
|
- openpgp/errors
|
||||||
|
- openpgp/packet
|
||||||
|
- openpgp/s2k
|
||||||
|
|
|
@ -131,6 +131,7 @@ main() {
|
||||||
err "--build and --cleanup cannot be used together"
|
err "--build and --cleanup cannot be used together"
|
||||||
exit 1
|
exit 1
|
||||||
elif [ "$BUILD" == "true" ]; then
|
elif [ "$BUILD" == "true" ]; then
|
||||||
|
chmod -R go-w "${GOPATH}/src/github.com/elastic/beats"
|
||||||
build
|
build
|
||||||
elif [ "$CLEANUP" == "true" ]; then
|
elif [ "$CLEANUP" == "true" ]; then
|
||||||
cleanup
|
cleanup
|
||||||
|
@ -140,4 +141,5 @@ main() {
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
umask 022
|
||||||
main $*
|
main $*
|
||||||
|
|
|
@ -0,0 +1,289 @@
|
||||||
|
package dev_tools
|
||||||
|
|
||||||
|
// This file contains tests that can be run on the generated packages.
|
||||||
|
// To run these tests use `go test package_test.go`.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"archive/zip"
|
||||||
|
"bytes"
|
||||||
|
"compress/gzip"
|
||||||
|
"flag"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/blakesmith/ar"
|
||||||
|
"github.com/cavaliercoder/go-rpm"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
expectedConfigMode = os.FileMode(0600)
|
||||||
|
expectedConfigUID = 0
|
||||||
|
expectedConfigGID = 0
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
configFilePattern = regexp.MustCompile(`.*beat\.yml`)
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
files = flag.String("files", "../build/upload/*/*", "filepath glob containing package files")
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRPM(t *testing.T) {
|
||||||
|
rpms := getFiles(t, regexp.MustCompile(`\.rpm$`))
|
||||||
|
for _, rpm := range rpms {
|
||||||
|
checkRPM(t, rpm)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDeb(t *testing.T) {
|
||||||
|
debs := getFiles(t, regexp.MustCompile(`\.deb$`))
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
for _, deb := range debs {
|
||||||
|
checkDeb(t, deb, buf)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTar(t *testing.T) {
|
||||||
|
tars := getFiles(t, regexp.MustCompile(`\.tar\.gz$`))
|
||||||
|
for _, tar := range tars {
|
||||||
|
checkTar(t, tar)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestZip(t *testing.T) {
|
||||||
|
zips := getFiles(t, regexp.MustCompile(`^\w+beat-\S+.zip$`))
|
||||||
|
for _, zip := range zips {
|
||||||
|
checkZip(t, zip)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sub-tests
|
||||||
|
|
||||||
|
func checkRPM(t *testing.T, file string) {
|
||||||
|
p, err := readRPM(file)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
checkConfigPermissions(t, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkDeb(t *testing.T, file string, buf *bytes.Buffer) {
|
||||||
|
p, err := readDeb(file, buf)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
checkConfigPermissions(t, p)
|
||||||
|
checkConfigOwner(t, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkTar(t *testing.T, file string) {
|
||||||
|
p, err := readTar(file)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
checkConfigPermissions(t, p)
|
||||||
|
checkConfigOwner(t, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkZip(t *testing.T, file string) {
|
||||||
|
p, err := readZip(file)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
checkConfigPermissions(t, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that the main configuration file is installed with a 0600 file mode.
|
||||||
|
func checkConfigPermissions(t *testing.T, p *packageFile) {
|
||||||
|
t.Run(p.Name+" config file permissions", func(t *testing.T) {
|
||||||
|
for _, entry := range p.Contents {
|
||||||
|
if configFilePattern.MatchString(entry.File) {
|
||||||
|
mode := entry.Mode.Perm()
|
||||||
|
if expectedConfigMode != mode {
|
||||||
|
t.Errorf("file %v has wrong permissions: expected=%v actual=%v",
|
||||||
|
entry.Mode, expectedConfigMode, mode)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
t.Errorf("no config file found matching %v", configFilePattern)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkConfigOwner(t *testing.T, p *packageFile) {
|
||||||
|
t.Run(p.Name+" config file owner", func(t *testing.T) {
|
||||||
|
for _, entry := range p.Contents {
|
||||||
|
if configFilePattern.MatchString(entry.File) {
|
||||||
|
if expectedConfigUID != entry.UID {
|
||||||
|
t.Errorf("file %v should be owned by user %v, owner=%v", entry.File, expectedConfigGID, entry.UID)
|
||||||
|
}
|
||||||
|
if expectedConfigGID != entry.GID {
|
||||||
|
t.Errorf("file %v should be owned by group %v, group=%v", entry.File, expectedConfigGID, entry.GID)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
t.Errorf("no config file found matching %v", configFilePattern)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helpers
|
||||||
|
|
||||||
|
type packageFile struct {
|
||||||
|
Name string
|
||||||
|
Contents map[string]packageEntry
|
||||||
|
}
|
||||||
|
|
||||||
|
type packageEntry struct {
|
||||||
|
File string
|
||||||
|
UID int
|
||||||
|
GID int
|
||||||
|
Mode os.FileMode
|
||||||
|
}
|
||||||
|
|
||||||
|
func getFiles(t *testing.T, pattern *regexp.Regexp) []string {
|
||||||
|
matches, err := filepath.Glob(*files)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
files := matches[:0]
|
||||||
|
for _, f := range matches {
|
||||||
|
if pattern.MatchString(filepath.Base(f)) {
|
||||||
|
files = append(files, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return files
|
||||||
|
}
|
||||||
|
|
||||||
|
func readRPM(rpmFile string) (*packageFile, error) {
|
||||||
|
p, err := rpm.OpenPackageFile(rpmFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
contents := p.Files()
|
||||||
|
pf := &packageFile{Name: filepath.Base(rpmFile), Contents: map[string]packageEntry{}}
|
||||||
|
|
||||||
|
for _, file := range contents {
|
||||||
|
pf.Contents[file.Name()] = packageEntry{
|
||||||
|
File: file.Name(),
|
||||||
|
Mode: file.Mode(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return pf, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// readDeb reads the data.tar.gz file from the .deb.
|
||||||
|
func readDeb(debFile string, dataBuffer *bytes.Buffer) (*packageFile, error) {
|
||||||
|
file, err := os.Open(debFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
arReader := ar.NewReader(file)
|
||||||
|
for {
|
||||||
|
header, err := arReader.Next()
|
||||||
|
if err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.HasPrefix(header.Name, "data.tar.gz") {
|
||||||
|
dataBuffer.Reset()
|
||||||
|
_, err := io.Copy(dataBuffer, arReader)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
gz, err := gzip.NewReader(dataBuffer)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer gz.Close()
|
||||||
|
|
||||||
|
return readTarContents(filepath.Base(debFile), gz)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
func readTar(tarFile string) (*packageFile, error) {
|
||||||
|
file, err := os.Open(tarFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
var fileReader io.ReadCloser = file
|
||||||
|
if strings.HasSuffix(tarFile, ".gz") {
|
||||||
|
if fileReader, err = gzip.NewReader(file); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer fileReader.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
return readTarContents(filepath.Base(tarFile), fileReader)
|
||||||
|
}
|
||||||
|
|
||||||
|
func readTarContents(tarName string, data io.Reader) (*packageFile, error) {
|
||||||
|
tarReader := tar.NewReader(data)
|
||||||
|
|
||||||
|
p := &packageFile{Name: tarName, Contents: map[string]packageEntry{}}
|
||||||
|
for {
|
||||||
|
header, err := tarReader.Next()
|
||||||
|
if err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
p.Contents[header.Name] = packageEntry{
|
||||||
|
File: header.Name,
|
||||||
|
UID: header.Uid,
|
||||||
|
GID: header.Gid,
|
||||||
|
Mode: os.FileMode(header.Mode),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func readZip(zipFile string) (*packageFile, error) {
|
||||||
|
r, err := zip.OpenReader(zipFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer r.Close()
|
||||||
|
|
||||||
|
p := &packageFile{Name: filepath.Base(zipFile), Contents: map[string]packageEntry{}}
|
||||||
|
for _, f := range r.File {
|
||||||
|
p.Contents[f.Name] = packageEntry{
|
||||||
|
File: f.Name,
|
||||||
|
Mode: f.Mode(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return p, nil
|
||||||
|
}
|
|
@ -3,35 +3,36 @@ SNAPSHOT?=yes
|
||||||
|
|
||||||
BEATS_BUILDER_IMAGE?=tudorg/beats-builder
|
BEATS_BUILDER_IMAGE?=tudorg/beats-builder
|
||||||
BEATS_BUILDER_DEB6_IMAGE?=tudorg/beats-builder-deb6
|
BEATS_BUILDER_DEB6_IMAGE?=tudorg/beats-builder-deb6
|
||||||
|
BEATS_GOPATH=$(firstword $(subst :, ,${GOPATH}))
|
||||||
|
|
||||||
makefile_abspath:=$(abspath $(lastword $(MAKEFILE_LIST)))
|
makefile_abspath:=$(abspath $(lastword $(MAKEFILE_LIST)))
|
||||||
packer_absdir=$(shell dirname ${makefile_abspath})
|
packer_absdir=$(shell dirname ${makefile_abspath})
|
||||||
beat_abspath=${GOPATH}/src/${BEAT_DIR}
|
beat_abspath=${BEATS_GOPATH}/src/${BEAT_PATH}
|
||||||
|
|
||||||
|
|
||||||
%/deb: ${BUILD_DIR}/god-linux-386 ${BUILD_DIR}/god-linux-amd64 fpm-image
|
%/deb: ${BUILD_DIR}/god-linux-386 ${BUILD_DIR}/god-linux-amd64 fpm-image
|
||||||
echo Creating DEB packages for $(@D)
|
echo Creating DEB packages for $(@D)
|
||||||
ARCH=386 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_DIR=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/debian/build.sh
|
ARCH=386 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/debian/build.sh
|
||||||
ARCH=amd64 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_DIR=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/debian/build.sh
|
ARCH=amd64 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/debian/build.sh
|
||||||
|
|
||||||
%/rpm: ${BUILD_DIR}/god-linux-386 ${BUILD_DIR}/god-linux-amd64 fpm-image
|
%/rpm: ${BUILD_DIR}/god-linux-386 ${BUILD_DIR}/god-linux-amd64 fpm-image
|
||||||
echo Creating RPM packages for $(@D)
|
echo Creating RPM packages for $(@D)
|
||||||
ARCH=386 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_DIR=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/centos/build.sh
|
ARCH=386 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/centos/build.sh
|
||||||
ARCH=amd64 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_DIR=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/centos/build.sh
|
ARCH=amd64 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/centos/build.sh
|
||||||
|
|
||||||
%/darwin:
|
%/darwin:
|
||||||
echo Creating Darwin packages for $(@D)
|
echo Creating Darwin packages for $(@D)
|
||||||
ARCH=amd64 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_DIR=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/darwin/build.sh
|
ARCH=amd64 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/darwin/build.sh
|
||||||
|
|
||||||
%/win:
|
%/win:
|
||||||
echo Creating Darwin packages for $(@D)
|
echo Creating Darwin packages for $(@D)
|
||||||
ARCH=386 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_DIR=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/windows/build.sh
|
ARCH=386 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/windows/build.sh
|
||||||
ARCH=amd64 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_DIR=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/windows/build.sh
|
ARCH=amd64 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/windows/build.sh
|
||||||
|
|
||||||
%/bin:
|
%/bin:
|
||||||
echo Creating Linux packages for $(@D)
|
echo Creating Linux packages for $(@D)
|
||||||
ARCH=386 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_DIR=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/binary/build.sh
|
ARCH=386 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/binary/build.sh
|
||||||
ARCH=amd64 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_DIR=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/binary/build.sh
|
ARCH=amd64 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/binary/build.sh
|
||||||
|
|
||||||
.PHONY: package-dashboards
|
.PHONY: package-dashboards
|
||||||
package-dashboards:
|
package-dashboards:
|
||||||
|
|
|
@ -14,5 +14,12 @@ RUN \
|
||||||
apt-get -o Acquire::Check-Valid-Until=false update && \
|
apt-get -o Acquire::Check-Valid-Until=false update && \
|
||||||
apt-get install -y libpcap0.8-dev
|
apt-get install -y libpcap0.8-dev
|
||||||
|
|
||||||
|
|
||||||
|
# Old git version which does not support proxy with go get requires to fetch go-yaml directly
|
||||||
|
RUN git clone https://github.com/go-yaml/yaml.git /go/src/gopkg.in/yaml.v2
|
||||||
|
|
||||||
|
# Load gotpl which is needed for creating the templates.
|
||||||
|
RUN go get github.com/tsg/gotpl
|
||||||
|
|
||||||
# add patch for gopacket
|
# add patch for gopacket
|
||||||
ADD gopacket_pcap.patch /gopacket_pcap.patch
|
ADD gopacket_pcap.patch /gopacket_pcap.patch
|
||||||
|
|
|
@ -1,15 +0,0 @@
|
||||||
# Go cross compiler (xgo): Go 1.7.1 layer
|
|
||||||
# Copyright (c) 2014 Péter Szilágyi. All rights reserved.
|
|
||||||
#
|
|
||||||
# Released under the MIT license.
|
|
||||||
|
|
||||||
FROM tudorg/xgo-deb6-base
|
|
||||||
|
|
||||||
MAINTAINER Tudor Golubenco <tudor@elastic.co>
|
|
||||||
|
|
||||||
# Configure the root Go distribution and bootstrap based on it
|
|
||||||
RUN \
|
|
||||||
export ROOT_DIST="https://storage.googleapis.com/golang/go1.7.1.linux-amd64.tar.gz" && \
|
|
||||||
export ROOT_DIST_SHA1="919ab01305ada0078a9fdf8a12bb56fb0b8a1444" && \
|
|
||||||
\
|
|
||||||
$BOOTSTRAP_PURE
|
|
5
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/base/build.sh
generated
vendored
5
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/base/build.sh
generated
vendored
|
@ -21,7 +21,6 @@
|
||||||
# Download the canonical import path (may fail, don't allow failures beyond)
|
# Download the canonical import path (may fail, don't allow failures beyond)
|
||||||
SRC_FOLDER=$SOURCE
|
SRC_FOLDER=$SOURCE
|
||||||
|
|
||||||
BEAT_PATH=$1
|
|
||||||
DST_FOLDER=`dirname $GOPATH/src/$BEAT_PATH`
|
DST_FOLDER=`dirname $GOPATH/src/$BEAT_PATH`
|
||||||
GIT_REPO=$BEAT_PATH
|
GIT_REPO=$BEAT_PATH
|
||||||
|
|
||||||
|
@ -110,8 +109,8 @@ if [ "$STATIC" == "true" ]; then LDARGS=--ldflags\ \'-extldflags\ \"-static\"\';
|
||||||
|
|
||||||
if [ -n $BEFORE_BUILD ]; then
|
if [ -n $BEFORE_BUILD ]; then
|
||||||
chmod +x /scripts/$BEFORE_BUILD
|
chmod +x /scripts/$BEFORE_BUILD
|
||||||
echo "Execute /scripts/$BEFORE_BUILD ${BEAT_PATH}"
|
echo "Execute /scripts/$BEFORE_BUILD ${BEAT_PATH} ${ES_BEATS}"
|
||||||
/scripts/$BEFORE_BUILD ${BEAT_PATH}
|
/scripts/$BEFORE_BUILD
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -24,6 +24,9 @@ RUN \
|
||||||
unzip `basename $WPDPACK_URL` -d /libpcap/win && \
|
unzip `basename $WPDPACK_URL` -d /libpcap/win && \
|
||||||
rm `basename $WPDPACK_URL`
|
rm `basename $WPDPACK_URL`
|
||||||
|
|
||||||
|
# Load gotpl
|
||||||
|
RUN go get github.com/tsg/gotpl
|
||||||
|
|
||||||
# Add patch for gopacket.
|
# Add patch for gopacket.
|
||||||
ADD gopacket_pcap.patch /gopacket_pcap.patch
|
ADD gopacket_pcap.patch /gopacket_pcap.patch
|
||||||
|
|
||||||
|
|
15
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/go-1.7.1/Dockerfile
generated
vendored
15
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/go-1.7.1/Dockerfile
generated
vendored
|
@ -1,15 +0,0 @@
|
||||||
# Go cross compiler (xgo): Go 1.7.1 layer
|
|
||||||
# Copyright (c) 2014 Péter Szilágyi. All rights reserved.
|
|
||||||
#
|
|
||||||
# Released under the MIT license.
|
|
||||||
|
|
||||||
FROM tudorg/xgo-base
|
|
||||||
|
|
||||||
MAINTAINER Tudor Golubenco <tudor@elastic.co>
|
|
||||||
|
|
||||||
# Configure the root Go distribution and bootstrap based on it
|
|
||||||
RUN \
|
|
||||||
export ROOT_DIST="https://storage.googleapis.com/golang/go1.7.1.linux-amd64.tar.gz" && \
|
|
||||||
export ROOT_DIST_SHA1="919ab01305ada0078a9fdf8a12bb56fb0b8a1444" && \
|
|
||||||
\
|
|
||||||
$BOOTSTRAP_PURE
|
|
|
@ -12,7 +12,7 @@ gotpl ${BASEDIR}/run.sh.j2 < ${BUILD_DIR}/settings-$runid.yml > ${BUILD_DIR}/run
|
||||||
chmod +x ${BUILD_DIR}/run-$runid.sh
|
chmod +x ${BUILD_DIR}/run-$runid.sh
|
||||||
|
|
||||||
docker run --rm -v ${BUILD_DIR}:/build \
|
docker run --rm -v ${BUILD_DIR}:/build \
|
||||||
-e BUILDID=$BUILDID -e SNAPSHOT=$SNAPSHOT -e RUNID=$runid -e BEATNAME=$BEATNAME \
|
-e BUILDID=$BUILDID -e SNAPSHOT=$SNAPSHOT -e RUNID=$runid -e BEAT_NAME=$BEAT_NAME \
|
||||||
tudorg/fpm /build/run-$runid.sh
|
tudorg/fpm /build/run-$runid.sh
|
||||||
|
|
||||||
rm ${BUILD_DIR}/settings-$runid.yml ${BUILD_DIR}/run-$runid.sh
|
rm ${BUILD_DIR}/settings-$runid.yml ${BUILD_DIR}/run-$runid.sh
|
||||||
|
|
14
vendor/github.com/elastic/beats/dev-tools/packer/platforms/dashboards/run.sh.j2
generated
vendored
14
vendor/github.com/elastic/beats/dev-tools/packer/platforms/dashboards/run.sh.j2
generated
vendored
|
@ -10,14 +10,14 @@ if [ "$SNAPSHOT" = "yes" ]; then
|
||||||
VERSION="${VERSION}-SNAPSHOT"
|
VERSION="${VERSION}-SNAPSHOT"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
mkdir /${BEATNAME:-beats}-dashboards-${VERSION}
|
mkdir /${BEAT_NAME:-beats}-dashboards-${VERSION}
|
||||||
cp -a dashboards/. /${BEATNAME:-beats}-dashboards-${VERSION}/
|
cp -a dashboards/. /${BEAT_NAME:-beats}-dashboards-${VERSION}/
|
||||||
echo "$BUILDID" > /${BEATNAME:-beats}-dashboards-${VERSION}/.build_hash.txt
|
echo "$BUILDID" > /${BEAT_NAME:-beats}-dashboards-${VERSION}/.build_hash.txt
|
||||||
|
|
||||||
mkdir -p upload
|
mkdir -p upload
|
||||||
zip -r upload/${BEATNAME:-beats}-dashboards-${VERSION}.zip /${BEATNAME:-beats}-dashboards-${VERSION}
|
zip -r upload/${BEAT_NAME:-beats}-dashboards-${VERSION}.zip /${BEAT_NAME:-beats}-dashboards-${VERSION}
|
||||||
echo "Created upload/${BEATNAME:-beats}-dashboards-${VERSION}.zip"
|
echo "Created upload/${BEAT_NAME:-beats}-dashboards-${VERSION}.zip"
|
||||||
|
|
||||||
cd upload
|
cd upload
|
||||||
sha1sum ${BEATNAME:-beats}-dashboards-${VERSION}.zip | awk '{print $1;}' > ${BEATNAME:-beats}-dashboards-${VERSION}.zip.sha1
|
sha1sum ${BEAT_NAME:-beats}-dashboards-${VERSION}.zip | awk '{print $1;}' > ${BEAT_NAME:-beats}-dashboards-${VERSION}.zip.sha1
|
||||||
echo "Created upload/${BEATNAME:-beats}-dashboards-${VERSION}.zip.sha1"
|
echo "Created upload/${BEAT_NAME:-beats}-dashboards-${VERSION}.zip.sha1"
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
version: "1.0.0"
|
version: "5.3.0"
|
||||||
|
|
|
@ -2,75 +2,61 @@
|
||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
BEAT_PATH=/go/src/${1}
|
if [ $BEAT_NAME = "packetbeat" ]; then
|
||||||
# BEATNAME is in the $PACK variable
|
|
||||||
BEATNAME=$PACK
|
|
||||||
|
|
||||||
if [ $BEATNAME = "packetbeat" ]; then
|
|
||||||
patch -p1 < /gopacket_pcap.patch
|
patch -p1 < /gopacket_pcap.patch
|
||||||
fi
|
fi
|
||||||
|
|
||||||
cd $BEAT_PATH
|
cd $GOPATH/src/$BEAT_PATH
|
||||||
|
|
||||||
|
# Files must be copied before before-build calls to allow modifications on the config files
|
||||||
|
|
||||||
PREFIX=/build
|
PREFIX=/build
|
||||||
|
|
||||||
|
# Copy template
|
||||||
|
cp $BEAT_NAME.template.json $PREFIX/$BEAT_NAME.template.json
|
||||||
|
cp $BEAT_NAME.template-es2x.json $PREFIX/$BEAT_NAME.template-es2x.json
|
||||||
|
|
||||||
|
# linux
|
||||||
|
cp $BEAT_NAME.yml $PREFIX/$BEAT_NAME-linux.yml
|
||||||
|
chmod 0600 $PREFIX/$BEAT_NAME-linux.yml
|
||||||
|
cp $BEAT_NAME.full.yml $PREFIX/$BEAT_NAME-linux.full.yml
|
||||||
|
|
||||||
|
# darwin
|
||||||
|
cp $BEAT_NAME.yml $PREFIX/$BEAT_NAME-darwin.yml
|
||||||
|
chmod 0600 $PREFIX/$BEAT_NAME-darwin.yml
|
||||||
|
cp $BEAT_NAME.full.yml $PREFIX/$BEAT_NAME-darwin.full.yml
|
||||||
|
|
||||||
|
# win
|
||||||
|
cp $BEAT_NAME.yml $PREFIX/$BEAT_NAME-win.yml
|
||||||
|
chmod 0600 $PREFIX/$BEAT_NAME-win.yml
|
||||||
|
cp $BEAT_NAME.full.yml $PREFIX/$BEAT_NAME-win.full.yml
|
||||||
|
|
||||||
|
# Runs beat specific tasks which should be done before building
|
||||||
|
PREFIX=$PREFIX make before-build
|
||||||
|
|
||||||
# Add data to the home directory
|
# Add data to the home directory
|
||||||
mkdir -p $PREFIX/homedir
|
mkdir -p $PREFIX/homedir
|
||||||
make install-home HOME_PREFIX=$PREFIX/homedir
|
make install-home HOME_PREFIX=$PREFIX/homedir
|
||||||
|
|
||||||
# Compile the import_dashboards binary for the requested targets.
|
# Build dashboards
|
||||||
if [ -d $BEAT_PATH/../libbeat/ ]; then
|
|
||||||
# official Beats have libbeat in the top level folder
|
|
||||||
LIBBEAT_PATH=$BEAT_PATH/../libbeat/
|
|
||||||
elif [ -d $BEAT_PATH/vendor/github.com/elastic/beats/libbeat/ ]; then
|
|
||||||
# community Beats have libbeat vendored
|
|
||||||
LIBBEAT_PATH=$BEAT_PATH/vendor/github.com/elastic/beats/libbeat/
|
|
||||||
else
|
|
||||||
echo "Couldn't find the libbeat location"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
for TARGET in $TARGETS; do
|
for TARGET in $TARGETS; do
|
||||||
echo "Compiling import_dashboards for $TARGET"
|
echo "Compiling import_dashboards for $TARGET"
|
||||||
XGOOS=`echo $TARGET | cut -d '/' -f 1`
|
XGOOS=`echo $TARGET | cut -d '/' -f 1`
|
||||||
XGOARCH=`echo $TARGET | cut -d '/' -f 2`
|
XGOARCH=`echo $TARGET | cut -d '/' -f 2`
|
||||||
|
|
||||||
GOOS=$XGOOS GOARCH=$XGOARCH go build -ldflags "-X main.beat=${BEATNAME}" -o $PREFIX/import_dashboards-$XGOOS-$XGOARCH $LIBBEAT_PATH/dashboards/import_dashboards.go
|
GOOS=$XGOOS GOARCH=$XGOARCH go build -ldflags "-X main.beat=${BEAT_NAME}" -o $PREFIX/import_dashboards-$XGOOS-$XGOARCH ${ES_BEATS}/libbeat/dashboards/import_dashboards.go
|
||||||
done
|
done
|
||||||
|
|
||||||
if [ -n "BUILDID" ]; then
|
if [ -n "BUILDID" ]; then
|
||||||
echo "$BUILDID" > $PREFIX/homedir/.build_hash.txt
|
echo "$BUILDID" > $PREFIX/homedir/.build_hash.txt
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Install gotpl. Clone and copy needed as go-yaml is behind a proxy which doesn't work
|
|
||||||
# with git 1.7
|
|
||||||
git clone https://github.com/tsg/gotpl.git /go/src/github.com/tsg/gotpl
|
|
||||||
mkdir -p /go/src/gopkg.in/yaml.v2
|
|
||||||
|
|
||||||
cp -r $LIBBEAT_PATH/../vendor/gopkg.in/yaml.v2 /go/src/gopkg.in/
|
|
||||||
go install github.com/tsg/gotpl
|
|
||||||
|
|
||||||
# Append doc versions to package.yml
|
# Append doc versions to package.yml
|
||||||
cat ${LIBBEAT_PATH}/docs/version.asciidoc >> ${PREFIX}/package.yml
|
cat ${ES_BEATS}/libbeat/docs/version.asciidoc >> ${PREFIX}/package.yml
|
||||||
|
|
||||||
# Make variable naming of doc-branch compatible with gotpl. Generate and copy README.md into homedir
|
# Make variable naming of doc-branch compatible with gotpl. Generate and copy README.md into homedir
|
||||||
sed -i -e 's/:doc-branch/doc_branch/g' ${PREFIX}/package.yml
|
sed -i -e 's/:doc-branch/doc_branch/g' ${PREFIX}/package.yml
|
||||||
|
|
||||||
|
# Create README file
|
||||||
/go/bin/gotpl /templates/readme.md.j2 < ${PREFIX}/package.yml > ${PREFIX}/homedir/README.md
|
/go/bin/gotpl /templates/readme.md.j2 < ${PREFIX}/package.yml > ${PREFIX}/homedir/README.md
|
||||||
|
|
||||||
# Copy template
|
|
||||||
cp $BEATNAME.template.json $PREFIX/$BEATNAME.template.json
|
|
||||||
cp $BEATNAME.template-es2x.json $PREFIX/$BEATNAME.template-es2x.json
|
|
||||||
|
|
||||||
# linux
|
|
||||||
cp $BEATNAME.yml $PREFIX/$BEATNAME-linux.yml
|
|
||||||
cp $BEATNAME.full.yml $PREFIX/$BEATNAME-linux.full.yml
|
|
||||||
|
|
||||||
# darwin
|
|
||||||
cp $BEATNAME.yml $PREFIX/$BEATNAME-darwin.yml
|
|
||||||
cp $BEATNAME.full.yml $PREFIX/$BEATNAME-darwin.full.yml
|
|
||||||
|
|
||||||
# win
|
|
||||||
cp $BEATNAME.yml $PREFIX/$BEATNAME-win.yml
|
|
||||||
cp $BEATNAME.full.yml $PREFIX/$BEATNAME-win.full.yml
|
|
||||||
|
|
||||||
# Contains beat specific adjustments. As it is platform specific knowledge, it should be in packer not the beats itself
|
|
||||||
PREFIX=$PREFIX make before-build
|
|
||||||
|
|
20
vendor/github.com/elastic/beats/dev-tools/vendor/github.com/blakesmith/ar/COPYING
generated
vendored
Normal file
20
vendor/github.com/elastic/beats/dev-tools/vendor/github.com/blakesmith/ar/COPYING
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
Copyright (c) 2013 Blake Smith <blakesmith0@gmail.com>
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
THE SOFTWARE.
|
||||||
|
|
9
vendor/github.com/elastic/beats/dev-tools/vendor/github.com/blakesmith/ar/README.md
generated
vendored
Normal file
9
vendor/github.com/elastic/beats/dev-tools/vendor/github.com/blakesmith/ar/README.md
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
# Golang ar (archive) file reader
|
||||||
|
|
||||||
|
This is a simple library for reading and writing [ar](http://en.wikipedia.org/wiki/Ar_(Unix)) files in common format. It is influenced heavily in style and interface from the golang [tar](http://golang.org/pkg/archive/tar/) package.
|
||||||
|
|
||||||
|
## Author
|
||||||
|
|
||||||
|
Written by Blake Smith <blakesmith0@gmail.com>
|
||||||
|
|
||||||
|
Licensed under the MIT license.
|
48
vendor/github.com/elastic/beats/dev-tools/vendor/github.com/blakesmith/ar/common.go
generated
vendored
Normal file
48
vendor/github.com/elastic/beats/dev-tools/vendor/github.com/blakesmith/ar/common.go
generated
vendored
Normal file
|
@ -0,0 +1,48 @@
|
||||||
|
/*
|
||||||
|
Copyright (c) 2013 Blake Smith <blakesmith0@gmail.com>
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
THE SOFTWARE.
|
||||||
|
*/
|
||||||
|
package ar
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
HEADER_BYTE_SIZE = 60
|
||||||
|
GLOBAL_HEADER = "!<arch>\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Header struct {
|
||||||
|
Name string
|
||||||
|
ModTime time.Time
|
||||||
|
Uid int
|
||||||
|
Gid int
|
||||||
|
Mode int64
|
||||||
|
Size int64
|
||||||
|
}
|
||||||
|
|
||||||
|
type slicer []byte
|
||||||
|
|
||||||
|
func (sp *slicer) next(n int) (b []byte) {
|
||||||
|
s := *sp
|
||||||
|
b, *sp = s[0:n], s[n:]
|
||||||
|
return
|
||||||
|
}
|
155
vendor/github.com/elastic/beats/dev-tools/vendor/github.com/blakesmith/ar/reader.go
generated
vendored
Normal file
155
vendor/github.com/elastic/beats/dev-tools/vendor/github.com/blakesmith/ar/reader.go
generated
vendored
Normal file
|
@ -0,0 +1,155 @@
|
||||||
|
/*
|
||||||
|
Copyright (c) 2013 Blake Smith <blakesmith0@gmail.com>
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
THE SOFTWARE.
|
||||||
|
*/
|
||||||
|
package ar
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Provides read access to an ar archive.
|
||||||
|
// Call next to skip files
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
// reader := NewReader(f)
|
||||||
|
// var buf bytes.Buffer
|
||||||
|
// for {
|
||||||
|
// _, err := reader.Next()
|
||||||
|
// if err == io.EOF {
|
||||||
|
// break
|
||||||
|
// }
|
||||||
|
// if err != nil {
|
||||||
|
// t.Errorf(err.Error())
|
||||||
|
// }
|
||||||
|
// io.Copy(&buf, reader)
|
||||||
|
// }
|
||||||
|
|
||||||
|
type Reader struct {
|
||||||
|
r io.Reader
|
||||||
|
nb int64
|
||||||
|
pad int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copies read data to r. Strips the global ar header.
|
||||||
|
func NewReader(r io.Reader) *Reader {
|
||||||
|
io.CopyN(ioutil.Discard, r, 8) // Discard global header
|
||||||
|
|
||||||
|
return &Reader{r: r}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rd *Reader) string(b []byte) string {
|
||||||
|
i := len(b)-1
|
||||||
|
for i > 0 && b[i] == 32 {
|
||||||
|
i--
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(b[0:i+1])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rd *Reader) numeric(b []byte) int64 {
|
||||||
|
i := len(b)-1
|
||||||
|
for i > 0 && b[i] == 32 {
|
||||||
|
i--
|
||||||
|
}
|
||||||
|
|
||||||
|
n, _ := strconv.ParseInt(string(b[0:i+1]), 10, 64)
|
||||||
|
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rd *Reader) octal(b []byte) int64 {
|
||||||
|
i := len(b)-1
|
||||||
|
for i > 0 && b[i] == 32 {
|
||||||
|
i--
|
||||||
|
}
|
||||||
|
|
||||||
|
n, _ := strconv.ParseInt(string(b[3:i+1]), 8, 64)
|
||||||
|
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rd *Reader) skipUnread() error {
|
||||||
|
skip := rd.nb + rd.pad
|
||||||
|
rd.nb, rd.pad = 0, 0
|
||||||
|
if seeker, ok := rd.r.(io.Seeker); ok {
|
||||||
|
_, err := seeker.Seek(skip, os.SEEK_CUR)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := io.CopyN(ioutil.Discard, rd.r, skip)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rd *Reader) readHeader() (*Header, error) {
|
||||||
|
headerBuf := make([]byte, HEADER_BYTE_SIZE)
|
||||||
|
if _, err := io.ReadFull(rd.r, headerBuf); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
header := new(Header)
|
||||||
|
s := slicer(headerBuf)
|
||||||
|
|
||||||
|
header.Name = rd.string(s.next(16))
|
||||||
|
header.ModTime = time.Unix(rd.numeric(s.next(12)), 0)
|
||||||
|
header.Uid = int(rd.numeric(s.next(6)))
|
||||||
|
header.Gid = int(rd.numeric(s.next(6)))
|
||||||
|
header.Mode = rd.octal(s.next(8))
|
||||||
|
header.Size = rd.numeric(s.next(10))
|
||||||
|
|
||||||
|
rd.nb = int64(header.Size)
|
||||||
|
if header.Size%2 == 1 {
|
||||||
|
rd.pad = 1
|
||||||
|
} else {
|
||||||
|
rd.pad = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
return header, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Call Next() to skip to the next file in the archive file.
|
||||||
|
// Returns a Header which contains the metadata about the
|
||||||
|
// file in the archive.
|
||||||
|
func (rd *Reader) Next() (*Header, error) {
|
||||||
|
err := rd.skipUnread()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return rd.readHeader()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read data from the current entry in the archive.
|
||||||
|
func (rd *Reader) Read(b []byte) (n int, err error) {
|
||||||
|
if rd.nb == 0 {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
if int64(len(b)) > rd.nb {
|
||||||
|
b = b[0:rd.nb]
|
||||||
|
}
|
||||||
|
n, err = rd.r.Read(b)
|
||||||
|
rd.nb -= int64(n)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
124
vendor/github.com/elastic/beats/dev-tools/vendor/github.com/blakesmith/ar/writer.go
generated
vendored
Normal file
124
vendor/github.com/elastic/beats/dev-tools/vendor/github.com/blakesmith/ar/writer.go
generated
vendored
Normal file
|
@ -0,0 +1,124 @@
|
||||||
|
/*
|
||||||
|
Copyright (c) 2013 Blake Smith <blakesmith0@gmail.com>
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
THE SOFTWARE.
|
||||||
|
*/
|
||||||
|
package ar
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrWriteTooLong = errors.New("ar: write too long")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Writer provides sequential writing of an ar archive.
|
||||||
|
// An ar archive is sequence of header file pairs
|
||||||
|
// Call WriteHeader to begin writing a new file, then call Write to supply the file's data
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
// archive := ar.NewWriter(writer)
|
||||||
|
// archive.WriteGlobalHeader()
|
||||||
|
// header := new(ar.Header)
|
||||||
|
// header.Size = 15 // bytes
|
||||||
|
// if err := archive.WriteHeader(header); err != nil {
|
||||||
|
// return err
|
||||||
|
// }
|
||||||
|
// io.Copy(archive, data)
|
||||||
|
type Writer struct {
|
||||||
|
w io.Writer
|
||||||
|
nb int64 // number of unwritten bytes for the current file entry
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a new ar writer that writes to w
|
||||||
|
func NewWriter(w io.Writer) *Writer { return &Writer{w: w} }
|
||||||
|
|
||||||
|
func (aw *Writer) numeric(b []byte, x int64) {
|
||||||
|
s := strconv.FormatInt(x, 10)
|
||||||
|
for len(s) < len(b) {
|
||||||
|
s = s + " "
|
||||||
|
}
|
||||||
|
copy(b, []byte(s))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aw *Writer) octal(b []byte, x int64) {
|
||||||
|
s := "100" + strconv.FormatInt(x, 8)
|
||||||
|
for len(s) < len(b) {
|
||||||
|
s = s + " "
|
||||||
|
}
|
||||||
|
copy(b, []byte(s))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aw *Writer) string(b []byte, str string) {
|
||||||
|
s := str
|
||||||
|
for len(s) < len(b) {
|
||||||
|
s = s + " "
|
||||||
|
}
|
||||||
|
copy(b, []byte(s))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Writes to the current entry in the ar archive
|
||||||
|
// Returns ErrWriteTooLong if more than header.Size
|
||||||
|
// bytes are written after a call to WriteHeader
|
||||||
|
func (aw *Writer) Write(b []byte) (n int, err error) {
|
||||||
|
if int64(len(b)) > aw.nb {
|
||||||
|
b = b[0:aw.nb]
|
||||||
|
err = ErrWriteTooLong
|
||||||
|
}
|
||||||
|
n, werr := aw.w.Write(b)
|
||||||
|
aw.nb -= int64(n)
|
||||||
|
if werr != nil {
|
||||||
|
return n, werr
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(b)%2 == 1 { // data size must be aligned to an even byte
|
||||||
|
n2, _ := aw.w.Write([]byte{'\n'})
|
||||||
|
return n+n2, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aw *Writer) WriteGlobalHeader() error {
|
||||||
|
_, err := aw.w.Write([]byte(GLOBAL_HEADER))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Writes the header to the underlying writer and prepares
|
||||||
|
// to receive the file payload
|
||||||
|
func (aw *Writer) WriteHeader(hdr *Header) error {
|
||||||
|
aw.nb = int64(hdr.Size)
|
||||||
|
header := make([]byte, HEADER_BYTE_SIZE)
|
||||||
|
s := slicer(header)
|
||||||
|
|
||||||
|
aw.string(s.next(16), hdr.Name)
|
||||||
|
aw.numeric(s.next(12), hdr.ModTime.Unix())
|
||||||
|
aw.numeric(s.next(6), int64(hdr.Uid))
|
||||||
|
aw.numeric(s.next(6), int64(hdr.Gid))
|
||||||
|
aw.octal(s.next(8), hdr.Mode)
|
||||||
|
aw.numeric(s.next(10), hdr.Size)
|
||||||
|
aw.string(s.next(2), "`\n")
|
||||||
|
|
||||||
|
_, err := aw.w.Write(header)
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
57
vendor/github.com/elastic/beats/dev-tools/vendor/github.com/cavaliercoder/go-rpm/README.md
generated
vendored
Normal file
57
vendor/github.com/elastic/beats/dev-tools/vendor/github.com/cavaliercoder/go-rpm/README.md
generated
vendored
Normal file
|
@ -0,0 +1,57 @@
|
||||||
|
# go-rpm [![Build Status](https://travis-ci.org/cavaliercoder/go-rpm.svg?branch=master)](https://travis-ci.org/cavaliercoder/go-rpm) [![GoDoc](https://godoc.org/github.com/cavaliercoder/go-rpm?status.svg)](https://godoc.org/github.com/cavaliercoder/go-rpm)
|
||||||
|
|
||||||
|
A native implementation of the RPM file specification in Go.
|
||||||
|
|
||||||
|
$ go get github.com/cavaliercoder/go-rpm
|
||||||
|
|
||||||
|
|
||||||
|
The go-rpm package aims to enable cross-platform tooling for yum/dnf/rpm
|
||||||
|
written in Go (E.g. [y10k](https://github.com/cavaliercoder/y10k)).
|
||||||
|
|
||||||
|
Initial goals include like-for-like implementation of existing rpm ecosystem
|
||||||
|
features such as:
|
||||||
|
|
||||||
|
* Reading of modern and legacy rpm package file formats
|
||||||
|
* Reading, creating and updating modern and legacy yum repository metadata
|
||||||
|
* Reading of the rpm database
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/cavaliercoder/go-rpm"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
p, err := rpm.OpenPackageFile("my-package.rpm")
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Loaded package: %v", p)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
Copyright (c) 2015 Ryan Armstrong
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
THE SOFTWARE.
|
113
vendor/github.com/elastic/beats/dev-tools/vendor/github.com/cavaliercoder/go-rpm/dependency.go
generated
vendored
Normal file
113
vendor/github.com/elastic/beats/dev-tools/vendor/github.com/cavaliercoder/go-rpm/dependency.go
generated
vendored
Normal file
|
@ -0,0 +1,113 @@
|
||||||
|
package rpm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Dependency flags indicate how versions comparisons should be computed when
|
||||||
|
// comparing versions of dependent packages.
|
||||||
|
const (
|
||||||
|
DepFlagAny = 0
|
||||||
|
DepFlagLesser = (1 << 1)
|
||||||
|
DepFlagGreater = (1 << 2)
|
||||||
|
DepFlagEqual = (1 << 3)
|
||||||
|
DepFlagLesserOrEqual = (DepFlagEqual | DepFlagLesser)
|
||||||
|
DepFlagGreaterOrEqual = (DepFlagEqual | DepFlagGreater)
|
||||||
|
)
|
||||||
|
|
||||||
|
// See: https://github.com/rpm-software-management/rpm/blob/master/lib/rpmds.h#L25
|
||||||
|
|
||||||
|
// Dependency is an interface which represents a relationship between two
|
||||||
|
// packages. It might indicate that one package requires, conflicts with,
|
||||||
|
// obsoletes or provides another package.
|
||||||
|
//
|
||||||
|
// Dependency implements the PackageVersion interface and so may be used when
|
||||||
|
// comparing versions with other types of packages.
|
||||||
|
type Dependency interface {
|
||||||
|
PackageVersion
|
||||||
|
|
||||||
|
// DepFlag constants
|
||||||
|
Flags() int
|
||||||
|
}
|
||||||
|
|
||||||
|
// private basic implementation or a package dependency.
|
||||||
|
type dependency struct {
|
||||||
|
flags int
|
||||||
|
name string
|
||||||
|
epoch int
|
||||||
|
version string
|
||||||
|
release string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dependencies are a slice of Dependency interfaces.
|
||||||
|
type Dependencies []Dependency
|
||||||
|
|
||||||
|
// NewDependency returns a new instance of a package dependency definition.
|
||||||
|
func NewDependency(flgs int, name string, epoch int, version string, release string) Dependency {
|
||||||
|
return &dependency{
|
||||||
|
flags: flgs,
|
||||||
|
name: name,
|
||||||
|
epoch: epoch,
|
||||||
|
version: version,
|
||||||
|
release: release,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a string representation a package dependency in a similar
|
||||||
|
// format to `rpm -qR`.
|
||||||
|
func (c *dependency) String() string {
|
||||||
|
s := c.name
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case DepFlagLesserOrEqual == (c.flags & DepFlagLesserOrEqual):
|
||||||
|
s = fmt.Sprintf("%s <=", s)
|
||||||
|
|
||||||
|
case DepFlagLesser == (c.flags & DepFlagLesser):
|
||||||
|
s = fmt.Sprintf("%s <", s)
|
||||||
|
|
||||||
|
case DepFlagGreaterOrEqual == (c.flags & DepFlagGreaterOrEqual):
|
||||||
|
s = fmt.Sprintf("%s >=", s)
|
||||||
|
|
||||||
|
case DepFlagGreater == (c.flags & DepFlagGreater):
|
||||||
|
s = fmt.Sprintf("%s >", s)
|
||||||
|
|
||||||
|
case DepFlagEqual == (c.flags & DepFlagEqual):
|
||||||
|
s = fmt.Sprintf("%s =", s)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.version != "" {
|
||||||
|
s = fmt.Sprintf("%s %s", s, c.version)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.release != "" {
|
||||||
|
s = fmt.Sprintf("%s.%s", s, c.release)
|
||||||
|
}
|
||||||
|
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flags determines the nature of the package relationship and the comparison
|
||||||
|
// used for the given version constraint.
|
||||||
|
func (c *dependency) Flags() int {
|
||||||
|
return c.flags
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name is the name of the package target package.
|
||||||
|
func (c *dependency) Name() string {
|
||||||
|
return c.name
|
||||||
|
}
|
||||||
|
|
||||||
|
// Epoch is the epoch constraint of the target package.
|
||||||
|
func (c *dependency) Epoch() int {
|
||||||
|
return c.epoch
|
||||||
|
}
|
||||||
|
|
||||||
|
// Version is the version constraint of the target package.
|
||||||
|
func (c *dependency) Version() string {
|
||||||
|
return c.version
|
||||||
|
}
|
||||||
|
|
||||||
|
// Release is the release constraint of the target package.
|
||||||
|
func (c *dependency) Release() string {
|
||||||
|
return c.release
|
||||||
|
}
|
21
vendor/github.com/elastic/beats/dev-tools/vendor/github.com/cavaliercoder/go-rpm/doc.go
generated
vendored
Normal file
21
vendor/github.com/elastic/beats/dev-tools/vendor/github.com/cavaliercoder/go-rpm/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
/*
|
||||||
|
A native implementation of the RPM file specification in Go.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/cavaliercoder/go-rpm"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
p, err := rpm.OpenPackageFile("my-package.rpm")
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Loaded package: %v", p)
|
||||||
|
}
|
||||||
|
|
||||||
|
*/
|
||||||
|
package rpm
|
65
vendor/github.com/elastic/beats/dev-tools/vendor/github.com/cavaliercoder/go-rpm/fileinfo.go
generated
vendored
Normal file
65
vendor/github.com/elastic/beats/dev-tools/vendor/github.com/cavaliercoder/go-rpm/fileinfo.go
generated
vendored
Normal file
|
@ -0,0 +1,65 @@
|
||||||
|
package rpm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A FileInfo describes a file in a RPM package and is returned by
|
||||||
|
// packagefile.Files.
|
||||||
|
//
|
||||||
|
// FileInfo implements the os.FileInfo interface.
|
||||||
|
type FileInfo struct {
|
||||||
|
name string
|
||||||
|
size int64
|
||||||
|
mode os.FileMode
|
||||||
|
modTime time.Time
|
||||||
|
isDir bool
|
||||||
|
owner string
|
||||||
|
group string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *FileInfo) String() string {
|
||||||
|
return f.Name()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name is the full path of a file in a RPM package
|
||||||
|
func (f *FileInfo) Name() string {
|
||||||
|
return f.name
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size is the size in bytes of a file in a RPM package
|
||||||
|
func (f *FileInfo) Size() int64 {
|
||||||
|
return f.size
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mode is the file mode in bits of a file in a RPM package
|
||||||
|
func (f *FileInfo) Mode() os.FileMode {
|
||||||
|
return f.mode
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModTime is the modification time of a file in a RPM package
|
||||||
|
func (f *FileInfo) ModTime() time.Time {
|
||||||
|
return f.modTime
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsDir returns true if a file is a directory in a RPM package
|
||||||
|
func (f *FileInfo) IsDir() bool {
|
||||||
|
return f.isDir
|
||||||
|
}
|
||||||
|
|
||||||
|
// Owner is the name of the owner of a file in a RPM package
|
||||||
|
func (f *FileInfo) Owner() string {
|
||||||
|
return f.owner
|
||||||
|
}
|
||||||
|
|
||||||
|
// Group is the name of the owner group of a file in a RPM package
|
||||||
|
func (f *FileInfo) Group() string {
|
||||||
|
return f.group
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sys is required to implement os.FileInfo and always returns nil
|
||||||
|
func (f *FileInfo) Sys() interface{} {
|
||||||
|
// underlying data source is a bunch of rpm header indices
|
||||||
|
return nil
|
||||||
|
}
|
42
vendor/github.com/elastic/beats/dev-tools/vendor/github.com/cavaliercoder/go-rpm/fuzz.go
generated
vendored
Normal file
42
vendor/github.com/elastic/beats/dev-tools/vendor/github.com/cavaliercoder/go-rpm/fuzz.go
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
||||||
|
// +build gofuzz
|
||||||
|
|
||||||
|
package rpm
|
||||||
|
|
||||||
|
import "bytes"
|
||||||
|
|
||||||
|
// Fuzz tests the parsing and error handling of random byte arrays using
|
||||||
|
// https://github.com/dvyukov/go-fuzz.
|
||||||
|
func Fuzz(data []byte) int {
|
||||||
|
if p, err := ReadPackageFile(bytes.NewReader(data)); err != nil {
|
||||||
|
// handled errors are not very interesting
|
||||||
|
return 0
|
||||||
|
} else {
|
||||||
|
// call some tag handlers
|
||||||
|
_ = p.String()
|
||||||
|
_ = p.Requires()
|
||||||
|
_ = p.Conflicts()
|
||||||
|
_ = p.Obsoletes()
|
||||||
|
_ = p.Provides()
|
||||||
|
|
||||||
|
// read all index values
|
||||||
|
for _, h := range p.Headers {
|
||||||
|
for _, x := range h.Indexes {
|
||||||
|
switch x.Type {
|
||||||
|
case IndexDataTypeBinary:
|
||||||
|
_ = h.Indexes.BytesByTag(x.Tag)
|
||||||
|
|
||||||
|
case IndexDataTypeChar, IndexDataTypeInt8, IndexDataTypeInt16, IndexDataTypeInt32, IndexDataTypeInt64:
|
||||||
|
_ = h.Indexes.IntsByTag(x.Tag)
|
||||||
|
_ = h.Indexes.IntByTag(x.Tag)
|
||||||
|
|
||||||
|
case IndexDataTypeString, IndexDataTypeI8NString, IndexDataTypeStringArray:
|
||||||
|
_ = h.Indexes.StringsByTag(x.Tag)
|
||||||
|
_ = h.Indexes.StringByTag(x.Tag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// everything worked with random input... interesting :|
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
}
|
136
vendor/github.com/elastic/beats/dev-tools/vendor/github.com/cavaliercoder/go-rpm/gpgcheck.go
generated
vendored
Normal file
136
vendor/github.com/elastic/beats/dev-tools/vendor/github.com/cavaliercoder/go-rpm/gpgcheck.go
generated
vendored
Normal file
|
@ -0,0 +1,136 @@
|
||||||
|
package rpm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/md5"
|
||||||
|
"fmt"
|
||||||
|
"golang.org/x/crypto/openpgp"
|
||||||
|
"golang.org/x/crypto/openpgp/errors"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Signature storage types (as defined in package lead segment)
|
||||||
|
const (
|
||||||
|
// Signature is stored in the first package header
|
||||||
|
RPMSIGTYPE_HEADERSIG = 5
|
||||||
|
)
|
||||||
|
|
||||||
|
// Predefined checksum errors.
|
||||||
|
var (
|
||||||
|
// ErrMD5ValidationFailed indicates that a RPM package failed checksum
|
||||||
|
// validation.
|
||||||
|
ErrMD5ValidationFailed = fmt.Errorf("MD5 checksum validation failed")
|
||||||
|
|
||||||
|
// ErrGPGValidationFailed indicates that a RPM package failed GPG signature
|
||||||
|
// validation.
|
||||||
|
ErrGPGValidationFailed = fmt.Errorf("GPG signature validation failed")
|
||||||
|
)
|
||||||
|
|
||||||
|
// rpmReadSigHeader reads the lead and signature header of a rpm package and
|
||||||
|
// returns a pointer to the signature header.
|
||||||
|
func rpmReadSigHeader(r io.Reader) (*Header, error) {
|
||||||
|
// read package lead
|
||||||
|
if lead, err := ReadPackageLead(r); err != nil {
|
||||||
|
return nil, err
|
||||||
|
} else {
|
||||||
|
// check signature type
|
||||||
|
if lead.SignatureType != RPMSIGTYPE_HEADERSIG {
|
||||||
|
return nil, fmt.Errorf("Unsupported signature type: 0x%x", lead.SignatureType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// read signature header
|
||||||
|
sigheader, err := ReadPackageHeader(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return sigheader, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GPGCheck validates the integrity of a RPM package file read from the given
|
||||||
|
// io.Reader. Public keys in the given keyring are used to validate the package
|
||||||
|
// signature.
|
||||||
|
//
|
||||||
|
// If validation succeeds, nil is returned. If validation fails,
|
||||||
|
// ErrGPGValidationFailed is returned.
|
||||||
|
//
|
||||||
|
// This function is an expensive operation which reads the entire package file.
|
||||||
|
func GPGCheck(r io.Reader, keyring openpgp.KeyRing) (string, error) {
|
||||||
|
// read signature header
|
||||||
|
sigheader, err := rpmReadSigHeader(r)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
// get signature bytes
|
||||||
|
var sigval []byte = nil
|
||||||
|
for _, tag := range []int{RPMSIGTAG_PGP, RPMSIGTAG_PGP5, RPMSIGTAG_GPG} {
|
||||||
|
if sigval = sigheader.Indexes.BytesByTag(tag); sigval != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if sigval == nil {
|
||||||
|
return "", fmt.Errorf("Package signature not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
// check signature
|
||||||
|
signer, err := openpgp.CheckDetachedSignature(keyring, r, bytes.NewReader(sigval))
|
||||||
|
if err == errors.ErrUnknownIssuer {
|
||||||
|
return "", ErrGPGValidationFailed
|
||||||
|
} else if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
// get signer identity
|
||||||
|
for id, _ := range signer.Identities {
|
||||||
|
return id, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", fmt.Errorf("No identity found in public key")
|
||||||
|
}
|
||||||
|
|
||||||
|
// MD5Check validates the integrity of a RPM package file read from the given
|
||||||
|
// io.Reader. An MD5 checksum is computed for the package payload and compared
|
||||||
|
// with the checksum value specified in the package header.
|
||||||
|
//
|
||||||
|
// If validation succeeds, nil is returned. If validation fails,
|
||||||
|
// ErrMD5ValidationFailed is returned.
|
||||||
|
//
|
||||||
|
// This function is an expensive operation which reads the entire package file.
|
||||||
|
func MD5Check(r io.Reader) error {
|
||||||
|
// read signature header
|
||||||
|
sigheader, err := rpmReadSigHeader(r)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// get expected payload size
|
||||||
|
payloadSize := sigheader.Indexes.IntByTag(RPMSIGTAG_SIZE)
|
||||||
|
if payloadSize == 0 {
|
||||||
|
return fmt.Errorf("RPMSIGTAG_SIZE tag not found in signature header")
|
||||||
|
}
|
||||||
|
|
||||||
|
// get expected payload md5 sum
|
||||||
|
sigmd5 := sigheader.Indexes.BytesByTag(RPMSIGTAG_MD5)
|
||||||
|
if sigmd5 == nil {
|
||||||
|
return fmt.Errorf("RPMSIGTAG_MD5 tag not found in signature header")
|
||||||
|
}
|
||||||
|
|
||||||
|
// compute payload sum
|
||||||
|
h := md5.New()
|
||||||
|
if n, err := io.Copy(h, r); err != nil {
|
||||||
|
return fmt.Errorf("Error reading payload: %v", err)
|
||||||
|
} else if n != payloadSize {
|
||||||
|
return ErrMD5ValidationFailed
|
||||||
|
}
|
||||||
|
|
||||||
|
// compare sums
|
||||||
|
payloadmd5 := h.Sum(nil)
|
||||||
|
if !bytes.Equal(payloadmd5, sigmd5) {
|
||||||
|
return ErrMD5ValidationFailed
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
291
vendor/github.com/elastic/beats/dev-tools/vendor/github.com/cavaliercoder/go-rpm/header.go
generated
vendored
Normal file
291
vendor/github.com/elastic/beats/dev-tools/vendor/github.com/cavaliercoder/go-rpm/header.go
generated
vendored
Normal file
|
@ -0,0 +1,291 @@
|
||||||
|
package rpm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Header stores metadata about a rpm package.
|
||||||
|
type Header struct {
|
||||||
|
Version int
|
||||||
|
IndexCount int
|
||||||
|
Length int
|
||||||
|
Indexes IndexEntries
|
||||||
|
Start int
|
||||||
|
End int
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers is an array of Header structs.
|
||||||
|
type Headers []Header
|
||||||
|
|
||||||
|
// Predefined sizing constraints.
|
||||||
|
const (
|
||||||
|
// MAX_HEADER_SIZE is the maximum allowable header size in bytes (32 MB).
|
||||||
|
MAX_HEADER_SIZE = 33554432
|
||||||
|
)
|
||||||
|
|
||||||
|
// Predefined header errors.
|
||||||
|
var (
|
||||||
|
// ErrBadHeaderLength indicates that the read header section is not the
|
||||||
|
// expected length.
|
||||||
|
ErrBadHeaderLength = fmt.Errorf("RPM header section is incorrect length")
|
||||||
|
|
||||||
|
// ErrNotHeader indicates that the read header section does start with the
|
||||||
|
// expected descriptor.
|
||||||
|
ErrNotHeader = fmt.Errorf("invalid RPM header descriptor")
|
||||||
|
|
||||||
|
// ErrBadStoreLength indicates that the read header store section is not the
|
||||||
|
// expected length.
|
||||||
|
ErrBadStoreLength = fmt.Errorf("header value store is incorrect length")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Predefined header index errors.
|
||||||
|
var (
|
||||||
|
// ErrBadIndexCount indicates that number of indexes given in the read
|
||||||
|
// header would exceed the actual size of the header.
|
||||||
|
ErrBadIndexCount = fmt.Errorf("index count exceeds header size")
|
||||||
|
|
||||||
|
// ErrBadIndexLength indicates that the read header index section is not the
|
||||||
|
// expected length.
|
||||||
|
ErrBadIndexLength = fmt.Errorf("index section is incorrect length")
|
||||||
|
|
||||||
|
// ErrIndexOutOfRange indicates that the read header index would exceed the
|
||||||
|
// range of the header.
|
||||||
|
ErrIndexOutOfRange = fmt.Errorf("index is out of range")
|
||||||
|
|
||||||
|
// ErrBadIndexType indicates that the read index contains a value of an
|
||||||
|
// unsupported data type.
|
||||||
|
ErrBadIndexType = fmt.Errorf("unknown index data type")
|
||||||
|
|
||||||
|
// ErrBadIndexValueCount indicates that the read index value would exceed
|
||||||
|
// the range of the header store section.
|
||||||
|
ErrBadIndexValueCount = fmt.Errorf("index value count is out of range")
|
||||||
|
)
|
||||||
|
|
||||||
|
// ReadPackageHeader reads an RPM package file header structure from the given
|
||||||
|
// io.Reader.
|
||||||
|
//
|
||||||
|
// This function should only be used if you intend to read a package header
|
||||||
|
// structure in isolation.
|
||||||
|
func ReadPackageHeader(r io.Reader) (*Header, error) {
|
||||||
|
// read the "header structure header"
|
||||||
|
header := make([]byte, 16)
|
||||||
|
n, err := r.Read(header)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if n != 16 {
|
||||||
|
return nil, ErrBadHeaderLength
|
||||||
|
}
|
||||||
|
|
||||||
|
// check magic number
|
||||||
|
if 0 != bytes.Compare(header[:3], []byte{0x8E, 0xAD, 0xE8}) {
|
||||||
|
return nil, ErrNotHeader
|
||||||
|
}
|
||||||
|
|
||||||
|
// translate header
|
||||||
|
h := &Header{
|
||||||
|
Version: int(header[3]),
|
||||||
|
IndexCount: int(binary.BigEndian.Uint32(header[8:12])),
|
||||||
|
Length: int(binary.BigEndian.Uint32(header[12:16])),
|
||||||
|
}
|
||||||
|
|
||||||
|
// make sure header size is in range
|
||||||
|
if h.Length > MAX_HEADER_SIZE {
|
||||||
|
return nil, ErrBadHeaderLength
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure index count is in range
|
||||||
|
// This test is not entirely precise as h.Length also includes the value
|
||||||
|
// store. It should at least help eliminate excessive buffer allocations for
|
||||||
|
// corrupted length values in the > h.Length ranges.
|
||||||
|
if h.IndexCount*16 > h.Length {
|
||||||
|
return nil, ErrBadIndexCount
|
||||||
|
}
|
||||||
|
|
||||||
|
h.Indexes = make(IndexEntries, h.IndexCount)
|
||||||
|
|
||||||
|
// read indexes
|
||||||
|
indexLength := 16 * h.IndexCount
|
||||||
|
indexes := make([]byte, indexLength)
|
||||||
|
n, err = r.Read(indexes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if n != indexLength {
|
||||||
|
return nil, ErrBadIndexLength
|
||||||
|
}
|
||||||
|
|
||||||
|
for x := 0; x < h.IndexCount; x++ {
|
||||||
|
o := 16 * x
|
||||||
|
index := IndexEntry{
|
||||||
|
Tag: int(binary.BigEndian.Uint32(indexes[o : o+4])),
|
||||||
|
Type: int(binary.BigEndian.Uint32(indexes[o+4 : o+8])),
|
||||||
|
Offset: int(binary.BigEndian.Uint32(indexes[o+8 : o+12])),
|
||||||
|
ItemCount: int(binary.BigEndian.Uint32(indexes[o+12 : o+16])),
|
||||||
|
}
|
||||||
|
|
||||||
|
// validate index offset
|
||||||
|
if index.Offset >= h.Length {
|
||||||
|
return nil, ErrIndexOutOfRange
|
||||||
|
}
|
||||||
|
|
||||||
|
// append
|
||||||
|
h.Indexes[x] = index
|
||||||
|
}
|
||||||
|
|
||||||
|
// read the "store"
|
||||||
|
store := make([]byte, h.Length)
|
||||||
|
n, err = r.Read(store)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if n != h.Length {
|
||||||
|
return nil, ErrBadStoreLength
|
||||||
|
}
|
||||||
|
|
||||||
|
// parse the value of each index from the store
|
||||||
|
for x := 0; x < h.IndexCount; x++ {
|
||||||
|
index := h.Indexes[x]
|
||||||
|
o := index.Offset
|
||||||
|
|
||||||
|
if index.ItemCount == 0 {
|
||||||
|
return nil, ErrBadIndexValueCount
|
||||||
|
}
|
||||||
|
|
||||||
|
switch index.Type {
|
||||||
|
case IndexDataTypeChar:
|
||||||
|
vals := make([]uint8, index.ItemCount)
|
||||||
|
for v := 0; v < index.ItemCount; v++ {
|
||||||
|
if o >= len(store) {
|
||||||
|
return nil, fmt.Errorf("uint8 value for index %d is out of range", x+1)
|
||||||
|
}
|
||||||
|
|
||||||
|
vals[v] = uint8(store[o])
|
||||||
|
o += 1
|
||||||
|
}
|
||||||
|
|
||||||
|
index.Value = vals
|
||||||
|
|
||||||
|
case IndexDataTypeInt8:
|
||||||
|
vals := make([]int8, index.ItemCount)
|
||||||
|
for v := 0; v < index.ItemCount; v++ {
|
||||||
|
if o >= len(store) {
|
||||||
|
return nil, fmt.Errorf("int8 value for index %d is out of range", x+1)
|
||||||
|
}
|
||||||
|
|
||||||
|
vals[v] = int8(store[o])
|
||||||
|
o += 1
|
||||||
|
}
|
||||||
|
|
||||||
|
index.Value = vals
|
||||||
|
|
||||||
|
case IndexDataTypeInt16:
|
||||||
|
vals := make([]int16, index.ItemCount)
|
||||||
|
for v := 0; v < index.ItemCount; v++ {
|
||||||
|
if o+2 > len(store) {
|
||||||
|
return nil, fmt.Errorf("int16 value for index %d is out of range", x+1)
|
||||||
|
}
|
||||||
|
|
||||||
|
vals[v] = int16(binary.BigEndian.Uint16(store[o : o+2]))
|
||||||
|
o += 2
|
||||||
|
}
|
||||||
|
|
||||||
|
index.Value = vals
|
||||||
|
|
||||||
|
case IndexDataTypeInt32:
|
||||||
|
vals := make([]int32, index.ItemCount)
|
||||||
|
for v := 0; v < index.ItemCount; v++ {
|
||||||
|
if o+4 > len(store) {
|
||||||
|
return nil, fmt.Errorf("int32 value for index %d is out of range", x+1)
|
||||||
|
}
|
||||||
|
|
||||||
|
vals[v] = int32(binary.BigEndian.Uint32(store[o : o+4]))
|
||||||
|
o += 4
|
||||||
|
}
|
||||||
|
|
||||||
|
index.Value = vals
|
||||||
|
|
||||||
|
case IndexDataTypeInt64:
|
||||||
|
vals := make([]int64, index.ItemCount)
|
||||||
|
for v := 0; v < index.ItemCount; v++ {
|
||||||
|
if o+8 > len(store) {
|
||||||
|
return nil, fmt.Errorf("int64 value for index %d is out of range", x+1)
|
||||||
|
}
|
||||||
|
|
||||||
|
vals[v] = int64(binary.BigEndian.Uint64(store[o : o+8]))
|
||||||
|
o += 8
|
||||||
|
}
|
||||||
|
|
||||||
|
index.Value = vals
|
||||||
|
|
||||||
|
case IndexDataTypeBinary:
|
||||||
|
if o+index.ItemCount > len(store) {
|
||||||
|
return nil, fmt.Errorf("[]byte value for index %d is out of range", x+1)
|
||||||
|
}
|
||||||
|
|
||||||
|
b := make([]byte, index.ItemCount)
|
||||||
|
copy(b, store[o:o+index.ItemCount])
|
||||||
|
|
||||||
|
index.Value = b
|
||||||
|
|
||||||
|
case IndexDataTypeString, IndexDataTypeStringArray, IndexDataTypeI8NString:
|
||||||
|
// allow atleast one byte per string
|
||||||
|
if o+index.ItemCount > len(store) {
|
||||||
|
return nil, fmt.Errorf("[]string value for index %d is out of range", x+1)
|
||||||
|
}
|
||||||
|
|
||||||
|
vals := make([]string, index.ItemCount)
|
||||||
|
|
||||||
|
for s := 0; s < index.ItemCount; s++ {
|
||||||
|
// calculate string length
|
||||||
|
var j int
|
||||||
|
for j = 0; (o+j) < len(store) && store[o+j] != 0; j++ {
|
||||||
|
}
|
||||||
|
|
||||||
|
if j == len(store) {
|
||||||
|
return nil, fmt.Errorf("string value for index %d is out of range", x+1)
|
||||||
|
}
|
||||||
|
|
||||||
|
vals[s] = string(store[o : o+j])
|
||||||
|
o += j + 1
|
||||||
|
}
|
||||||
|
|
||||||
|
index.Value = vals
|
||||||
|
|
||||||
|
case IndexDataTypeNull:
|
||||||
|
// nothing to do here
|
||||||
|
|
||||||
|
default:
|
||||||
|
// unknown data type
|
||||||
|
return nil, ErrBadIndexType
|
||||||
|
}
|
||||||
|
|
||||||
|
// save in array
|
||||||
|
h.Indexes[x] = index
|
||||||
|
}
|
||||||
|
|
||||||
|
// calculate location of the end of the header by padding to a multiple of 8
|
||||||
|
o := 8 - int(math.Mod(float64(h.Length), 8))
|
||||||
|
|
||||||
|
// seek to the end of the header
|
||||||
|
if o > 0 && o < 8 {
|
||||||
|
pad := make([]byte, o)
|
||||||
|
n, err = r.Read(pad)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Error seeking beyond header padding of %d bytes: %v", o, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if n != o {
|
||||||
|
return nil, fmt.Errorf("Error seeking beyond header padding of %d bytes: only %d bytes returned", o, n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return h, nil
|
||||||
|
}
|
159
vendor/github.com/elastic/beats/dev-tools/vendor/github.com/cavaliercoder/go-rpm/index.go
generated
vendored
Normal file
159
vendor/github.com/elastic/beats/dev-tools/vendor/github.com/cavaliercoder/go-rpm/index.go
generated
vendored
Normal file
|
@ -0,0 +1,159 @@
|
||||||
|
package rpm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Header index value data types.
|
||||||
|
const (
|
||||||
|
IndexDataTypeNull int = iota
|
||||||
|
IndexDataTypeChar
|
||||||
|
IndexDataTypeInt8
|
||||||
|
IndexDataTypeInt16
|
||||||
|
IndexDataTypeInt32
|
||||||
|
IndexDataTypeInt64
|
||||||
|
IndexDataTypeString
|
||||||
|
IndexDataTypeBinary
|
||||||
|
IndexDataTypeStringArray
|
||||||
|
IndexDataTypeI8NString
|
||||||
|
)
|
||||||
|
|
||||||
|
// An IndexEntry is a rpm key/value tag stored in the package header.
|
||||||
|
type IndexEntry struct {
|
||||||
|
Tag int
|
||||||
|
Type int
|
||||||
|
Offset int
|
||||||
|
ItemCount int
|
||||||
|
Value interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IndexEntries is an array of IndexEntry structs.
|
||||||
|
type IndexEntries []IndexEntry
|
||||||
|
|
||||||
|
// IndexByTag returns a pointer to an IndexEntry with the given tag ID or nil if
|
||||||
|
// the tag is not found.
|
||||||
|
func (c IndexEntries) IndexByTag(tag int) *IndexEntry {
|
||||||
|
for _, e := range c {
|
||||||
|
if e.Tag == tag {
|
||||||
|
return &e
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringByTag returns the string value of an IndexEntry or an empty string if
|
||||||
|
// the tag is not found or has no value.
|
||||||
|
func (c IndexEntries) StringByTag(tag int) string {
|
||||||
|
i := c.IndexByTag(tag)
|
||||||
|
if i == nil || i.Value == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
s := i.Value.([]string)
|
||||||
|
|
||||||
|
return s[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringsByTag returns the slice of string values of an IndexEntry or nil if
|
||||||
|
// the tag is not found or has no value.
|
||||||
|
func (c IndexEntries) StringsByTag(tag int) []string {
|
||||||
|
i := c.IndexByTag(tag)
|
||||||
|
if i == nil || i.Value == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return i.Value.([]string)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IntsByTag returns the int64 values of an IndexEntry or nil if the tag is not
|
||||||
|
// found or has no value. Values with a lower range (E.g. int8) are cast as an
|
||||||
|
// int64.
|
||||||
|
func (c IndexEntries) IntsByTag(tag int) []int64 {
|
||||||
|
ix := c.IndexByTag(tag)
|
||||||
|
if ix != nil && ix.Value != nil {
|
||||||
|
vals := make([]int64, ix.ItemCount)
|
||||||
|
|
||||||
|
for i := 0; i < int(ix.ItemCount); i++ {
|
||||||
|
switch ix.Type {
|
||||||
|
case IndexDataTypeChar, IndexDataTypeInt8:
|
||||||
|
vals[i] = int64(ix.Value.([]int8)[i])
|
||||||
|
|
||||||
|
case IndexDataTypeInt16:
|
||||||
|
vals[i] = int64(ix.Value.([]int16)[i])
|
||||||
|
|
||||||
|
case IndexDataTypeInt32:
|
||||||
|
vals[i] = int64(ix.Value.([]int32)[i])
|
||||||
|
|
||||||
|
case IndexDataTypeInt64:
|
||||||
|
vals[i] = ix.Value.([]int64)[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return vals
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IntByTag returns the int64 value of an IndexEntry or 0 if the tag is not found
|
||||||
|
// or has no value. Values with a lower range (E.g. int8) are cast as an int64.
|
||||||
|
func (c IndexEntries) IntByTag(tag int) int64 {
|
||||||
|
i := c.IndexByTag(tag)
|
||||||
|
if i != nil && i.Value != nil {
|
||||||
|
switch i.Type {
|
||||||
|
case IndexDataTypeChar, IndexDataTypeInt8:
|
||||||
|
return int64(i.Value.([]int8)[0])
|
||||||
|
|
||||||
|
case IndexDataTypeInt16:
|
||||||
|
return int64(i.Value.([]int16)[0])
|
||||||
|
|
||||||
|
case IndexDataTypeInt32:
|
||||||
|
return int64(i.Value.([]int32)[0])
|
||||||
|
|
||||||
|
case IndexDataTypeInt64:
|
||||||
|
return int64(i.Value.([]int64)[0])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// BytesByTag returns the raw value of an IndexEntry or nil if the tag is not
|
||||||
|
// found or has no value.
|
||||||
|
func (c IndexEntries) BytesByTag(tag int) []byte {
|
||||||
|
i := c.IndexByTag(tag)
|
||||||
|
if i == nil || i.Value == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return i.Value.([]byte)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimesByTag returns the value of an IndexEntry as a slice of Go native
|
||||||
|
// timestamps or nil if the tag is not found or has no value.
|
||||||
|
func (c IndexEntries) TimesByTag(tag int) []time.Time {
|
||||||
|
ix := c.IndexByTag(tag)
|
||||||
|
|
||||||
|
if ix == nil || ix.Value == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
vals := make([]time.Time, ix.ItemCount)
|
||||||
|
for i := 0; i < ix.ItemCount; i++ {
|
||||||
|
vals[i] = time.Unix(int64(ix.Value.([]int32)[i]), 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
return vals
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimeByTag returns the value of an IndexEntry as a Go native timestamp or
|
||||||
|
// zero-time if the tag is not found or has no value.
|
||||||
|
func (c IndexEntries) TimeByTag(tag int) time.Time {
|
||||||
|
vals := c.TimesByTag(tag)
|
||||||
|
if vals == nil || len(vals) == 0 {
|
||||||
|
return time.Time{}
|
||||||
|
}
|
||||||
|
|
||||||
|
return vals[0]
|
||||||
|
}
|
55
vendor/github.com/elastic/beats/dev-tools/vendor/github.com/cavaliercoder/go-rpm/keyring.go
generated
vendored
Normal file
55
vendor/github.com/elastic/beats/dev-tools/vendor/github.com/cavaliercoder/go-rpm/keyring.go
generated
vendored
Normal file
|
@ -0,0 +1,55 @@
|
||||||
|
package rpm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/crypto/openpgp"
|
||||||
|
"golang.org/x/crypto/openpgp/armor"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// KeyRing reads a openpgp.KeyRing from the given io.Reader which may then be
|
||||||
|
// used to validate GPG keys in RPM packages.
|
||||||
|
func KeyRing(r io.Reader) (openpgp.KeyRing, error) {
|
||||||
|
// decode gpgkey file
|
||||||
|
p, err := armor.Decode(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// extract keys
|
||||||
|
return openpgp.ReadKeyRing(p.Body)
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyRingFromFile reads a openpgp.KeyRing from the given file path which may
|
||||||
|
// then be used to validate GPG keys in RPM packages.
|
||||||
|
func KeyRingFromFile(path string) (openpgp.KeyRing, error) {
|
||||||
|
// open gpgkey file
|
||||||
|
f, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
// read keyring
|
||||||
|
return KeyRing(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyRingFromFiles reads a openpgp.KeyRing from the given file paths which may
|
||||||
|
// then be used to validate GPG keys in RPM packages.
|
||||||
|
//
|
||||||
|
// This function might typically be used to read all keys in /etc/pki/rpm-gpg.
|
||||||
|
func KeyRingFromFiles(files []string) (openpgp.KeyRing, error) {
|
||||||
|
keyring := make(openpgp.EntityList, 0)
|
||||||
|
for _, path := range files {
|
||||||
|
// read keyring in file
|
||||||
|
el, err := KeyRingFromFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// append keyring
|
||||||
|
keyring = append(keyring, el.(openpgp.EntityList)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return keyring, nil
|
||||||
|
}
|
79
vendor/github.com/elastic/beats/dev-tools/vendor/github.com/cavaliercoder/go-rpm/lead.go
generated
vendored
Normal file
79
vendor/github.com/elastic/beats/dev-tools/vendor/github.com/cavaliercoder/go-rpm/lead.go
generated
vendored
Normal file
|
@ -0,0 +1,79 @@
|
||||||
|
package rpm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Lead is the deprecated lead section of an RPM file which is used in legacy
|
||||||
|
// RPM versions to store package metadata.
|
||||||
|
type Lead struct {
|
||||||
|
VersionMajor int
|
||||||
|
VersionMinor int
|
||||||
|
Name string
|
||||||
|
Type int
|
||||||
|
Architecture int
|
||||||
|
OperatingSystem int
|
||||||
|
SignatureType int
|
||||||
|
}
|
||||||
|
|
||||||
|
// Predefined lead section errors.
|
||||||
|
var (
|
||||||
|
// ErrBadLeadLength indicates that the read lead section is not the expected
|
||||||
|
// length.
|
||||||
|
ErrBadLeadLength = fmt.Errorf("RPM lead section is incorrect length")
|
||||||
|
|
||||||
|
// ErrNotRPMFile indicates that the read file does not start with the
|
||||||
|
// expected descriptor.
|
||||||
|
ErrNotRPMFile = fmt.Errorf("RPM file descriptor is invalid")
|
||||||
|
|
||||||
|
// ErrUnsupportedVersion indicates that the read lead section version is not
|
||||||
|
// currently supported.
|
||||||
|
ErrUnsupportedVersion = fmt.Errorf("unsupported RPM package version")
|
||||||
|
)
|
||||||
|
|
||||||
|
// ReadPackageLead reads the deprecated lead section of an RPM file which is
|
||||||
|
// used in legacy RPM versions to store package metadata.
|
||||||
|
//
|
||||||
|
// This function should only be used if you intend to read a package lead in
|
||||||
|
// isolation.
|
||||||
|
func ReadPackageLead(r io.Reader) (*Lead, error) {
|
||||||
|
// read bytes
|
||||||
|
b := make([]byte, 96)
|
||||||
|
n, err := r.Read(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// check length
|
||||||
|
if n != 96 {
|
||||||
|
return nil, ErrBadLeadLength
|
||||||
|
}
|
||||||
|
|
||||||
|
// check magic number
|
||||||
|
if 0 != bytes.Compare(b[:4], []byte{0xED, 0xAB, 0xEE, 0xDB}) {
|
||||||
|
return nil, ErrNotRPMFile
|
||||||
|
}
|
||||||
|
|
||||||
|
// decode lead
|
||||||
|
lead := &Lead{
|
||||||
|
VersionMajor: int(b[4]),
|
||||||
|
VersionMinor: int(b[5]),
|
||||||
|
Type: int(binary.BigEndian.Uint16(b[6:8])),
|
||||||
|
Architecture: int(binary.BigEndian.Uint16(b[8:10])),
|
||||||
|
Name: string(b[10:76]),
|
||||||
|
OperatingSystem: int(binary.BigEndian.Uint16(b[76:78])),
|
||||||
|
SignatureType: int(binary.BigEndian.Uint16(b[78:80])),
|
||||||
|
}
|
||||||
|
|
||||||
|
// check version
|
||||||
|
if lead.VersionMajor < 3 || lead.VersionMajor > 4 {
|
||||||
|
return nil, ErrUnsupportedVersion
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: validate lead value ranges
|
||||||
|
|
||||||
|
return lead, nil
|
||||||
|
}
|
387
vendor/github.com/elastic/beats/dev-tools/vendor/github.com/cavaliercoder/go-rpm/packagefile.go
generated
vendored
Normal file
387
vendor/github.com/elastic/beats/dev-tools/vendor/github.com/cavaliercoder/go-rpm/packagefile.go
generated
vendored
Normal file
|
@ -0,0 +1,387 @@
|
||||||
|
package rpm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"math"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A PackageFile is an RPM package definition loaded directly from the pacakge
|
||||||
|
// file itself.
|
||||||
|
type PackageFile struct {
|
||||||
|
Lead Lead
|
||||||
|
Headers Headers
|
||||||
|
|
||||||
|
path string
|
||||||
|
fileSize uint64
|
||||||
|
fileTime time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadPackageFile reads a rpm package file from a stream and returns a pointer
|
||||||
|
// to it.
|
||||||
|
func ReadPackageFile(r io.Reader) (*PackageFile, error) {
|
||||||
|
// See: http://www.rpm.org/max-rpm/s1-rpm-file-format-rpm-file-format.html
|
||||||
|
p := &PackageFile{}
|
||||||
|
|
||||||
|
// read the deprecated "lead"
|
||||||
|
lead, err := ReadPackageLead(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
p.Lead = *lead
|
||||||
|
|
||||||
|
// read signature and header headers
|
||||||
|
offset := 96
|
||||||
|
p.Headers = make(Headers, 2)
|
||||||
|
for i := 0; i < 2; i++ {
|
||||||
|
// parse header
|
||||||
|
h, err := ReadPackageHeader(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("%v (v%d.%d)", err, lead.VersionMajor, lead.VersionMinor)
|
||||||
|
}
|
||||||
|
|
||||||
|
// set start and end offsets
|
||||||
|
h.Start = offset
|
||||||
|
h.End = h.Start + 16 + (16 * h.IndexCount) + h.Length
|
||||||
|
offset = h.End
|
||||||
|
|
||||||
|
// calculate location of the end of the header by padding to a multiple of 8
|
||||||
|
pad := 8 - int(math.Mod(float64(h.Length), 8))
|
||||||
|
if pad < 8 {
|
||||||
|
offset += pad
|
||||||
|
}
|
||||||
|
|
||||||
|
// append
|
||||||
|
p.Headers[i] = *h
|
||||||
|
}
|
||||||
|
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenPackageFile reads a rpm package from the file systems and returns a pointer
|
||||||
|
// to it.
|
||||||
|
func OpenPackageFile(path string) (*PackageFile, error) {
|
||||||
|
// stat file
|
||||||
|
fi, err := os.Stat(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// open file
|
||||||
|
f, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
// read package content
|
||||||
|
p, err := ReadPackageFile(f)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// set file info
|
||||||
|
p.path = path
|
||||||
|
p.fileSize = uint64(fi.Size())
|
||||||
|
p.fileTime = fi.ModTime()
|
||||||
|
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenPackageFiles reads all rpm packages with the .rpm suffix from the given
|
||||||
|
// directory on the file systems and returns a slice of pointers to the loaded
|
||||||
|
// packages.
|
||||||
|
func OpenPackageFiles(path string) ([]*PackageFile, error) {
|
||||||
|
// read directory
|
||||||
|
dir, err := ioutil.ReadDir(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// list *.rpm files
|
||||||
|
files := make([]string, 0)
|
||||||
|
for _, f := range dir {
|
||||||
|
if strings.HasSuffix(f.Name(), ".rpm") {
|
||||||
|
files = append(files, filepath.Join(path, f.Name()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// read packages
|
||||||
|
packages := make([]*PackageFile, len(files))
|
||||||
|
for i, f := range files {
|
||||||
|
p, err := OpenPackageFile(f)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
packages[i] = p
|
||||||
|
}
|
||||||
|
|
||||||
|
return packages, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// dependencies translates the given tag values into a slice of package
|
||||||
|
// relationships such as provides, conflicts, obsoletes and requires.
|
||||||
|
func (c *PackageFile) dependencies(nevrsTagId, flagsTagId, namesTagId, versionsTagId int) Dependencies {
|
||||||
|
// TODO: Implement NEVRS tags
|
||||||
|
|
||||||
|
flgs := c.Headers[1].Indexes.IntsByTag(flagsTagId)
|
||||||
|
names := c.Headers[1].Indexes.StringsByTag(namesTagId)
|
||||||
|
vers := c.Headers[1].Indexes.StringsByTag(versionsTagId)
|
||||||
|
|
||||||
|
deps := make(Dependencies, len(names))
|
||||||
|
for i := 0; i < len(names); i++ {
|
||||||
|
deps[i] = NewDependency(int(flgs[i]), names[i], 0, vers[i], "")
|
||||||
|
}
|
||||||
|
|
||||||
|
return deps
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the package identifier in the form
|
||||||
|
// '[name]-[version]-[release].[architecture]'.
|
||||||
|
func (c *PackageFile) String() string {
|
||||||
|
return fmt.Sprintf("%s-%s-%s.%s", c.Name(), c.Version(), c.Release(), c.Architecture())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Path returns the path which was given to open a package file if it was opened
|
||||||
|
// with OpenPackageFile.
|
||||||
|
func (c *PackageFile) Path() string {
|
||||||
|
return c.path
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileTime returns the time at which the RPM was last modified if known.
|
||||||
|
func (c *PackageFile) FileTime() time.Time {
|
||||||
|
return c.fileTime
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileSize returns the size of the package file in bytes.
|
||||||
|
func (c *PackageFile) FileSize() uint64 {
|
||||||
|
return c.fileSize
|
||||||
|
}
|
||||||
|
|
||||||
|
// Checksum computes and returns the SHA256 checksum (encoded in hexidecimal) of
|
||||||
|
// the package file.
|
||||||
|
func (c *PackageFile) Checksum() (string, error) {
|
||||||
|
if c.Path() == "" {
|
||||||
|
return "", fmt.Errorf("File not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
if f, err := os.Open(c.Path()); err != nil {
|
||||||
|
return "", err
|
||||||
|
} else {
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
s := sha256.New()
|
||||||
|
if _, err := io.Copy(s, f); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return hex.EncodeToString(s.Sum(nil)), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChecksumType returns "sha256"
|
||||||
|
func (c *PackageFile) ChecksumType() string {
|
||||||
|
return "sha256"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *PackageFile) HeaderStart() uint64 {
|
||||||
|
return uint64(c.Headers[1].Start)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *PackageFile) HeaderEnd() uint64 {
|
||||||
|
return uint64(c.Headers[1].End)
|
||||||
|
}
|
||||||
|
|
||||||
|
// For tag definitions, see:
|
||||||
|
// https://github.com/rpm-software-management/rpm/blob/master/lib/rpmtag.h#L61
|
||||||
|
|
||||||
|
func (c *PackageFile) Name() string {
|
||||||
|
return c.Headers[1].Indexes.StringByTag(1000)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *PackageFile) Version() string {
|
||||||
|
return c.Headers[1].Indexes.StringByTag(1001)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *PackageFile) Release() string {
|
||||||
|
return c.Headers[1].Indexes.StringByTag(1002)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *PackageFile) Epoch() int {
|
||||||
|
return int(c.Headers[1].Indexes.IntByTag(1003))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *PackageFile) Requires() Dependencies {
|
||||||
|
return c.dependencies(5041, 1048, 1049, 1050)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *PackageFile) Provides() Dependencies {
|
||||||
|
return c.dependencies(5042, 1112, 1047, 1113)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *PackageFile) Conflicts() Dependencies {
|
||||||
|
return c.dependencies(5044, 1053, 1054, 1055)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *PackageFile) Obsoletes() Dependencies {
|
||||||
|
return c.dependencies(5043, 1114, 1090, 1115)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Files returns file information for each file that is installed by this RPM
|
||||||
|
// package.
|
||||||
|
func (c *PackageFile) Files() []FileInfo {
|
||||||
|
ixs := c.Headers[1].Indexes.IntsByTag(1116)
|
||||||
|
names := c.Headers[1].Indexes.StringsByTag(1117)
|
||||||
|
dirs := c.Headers[1].Indexes.StringsByTag(1118)
|
||||||
|
modes := c.Headers[1].Indexes.IntsByTag(1030)
|
||||||
|
sizes := c.Headers[1].Indexes.IntsByTag(1028)
|
||||||
|
times := c.Headers[1].Indexes.IntsByTag(1034)
|
||||||
|
owners := c.Headers[1].Indexes.StringsByTag(1039)
|
||||||
|
groups := c.Headers[1].Indexes.StringsByTag(1040)
|
||||||
|
|
||||||
|
files := make([]FileInfo, len(names))
|
||||||
|
for i := 0; i < len(names); i++ {
|
||||||
|
files[i] = FileInfo{
|
||||||
|
name: dirs[ixs[i]] + names[i],
|
||||||
|
mode: os.FileMode(modes[i]),
|
||||||
|
size: sizes[i],
|
||||||
|
modTime: time.Unix(times[i], 0),
|
||||||
|
owner: owners[i],
|
||||||
|
group: groups[i],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return files
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *PackageFile) Summary() string {
|
||||||
|
return strings.Join(c.Headers[1].Indexes.StringsByTag(1004), "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *PackageFile) Description() string {
|
||||||
|
return strings.Join(c.Headers[1].Indexes.StringsByTag(1005), "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *PackageFile) BuildTime() time.Time {
|
||||||
|
return c.Headers[1].Indexes.TimeByTag(1006)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *PackageFile) BuildHost() string {
|
||||||
|
return c.Headers[1].Indexes.StringByTag(1007)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *PackageFile) InstallTime() time.Time {
|
||||||
|
return c.Headers[1].Indexes.TimeByTag(1008)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size specifies the disk space consumed by installation of the package.
|
||||||
|
func (c *PackageFile) Size() uint64 {
|
||||||
|
return uint64(c.Headers[1].Indexes.IntByTag(1009))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ArchiveSize specifies the size of the archived payload of the package in
|
||||||
|
// bytes.
|
||||||
|
func (c *PackageFile) ArchiveSize() uint64 {
|
||||||
|
if i := uint64(c.Headers[0].Indexes.IntByTag(1007)); i > 0 {
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
|
||||||
|
return uint64(c.Headers[1].Indexes.IntByTag(1046))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *PackageFile) Distribution() string {
|
||||||
|
return c.Headers[1].Indexes.StringByTag(1010)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *PackageFile) Vendor() string {
|
||||||
|
return c.Headers[1].Indexes.StringByTag(1011)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *PackageFile) GIFImage() []byte {
|
||||||
|
return c.Headers[1].Indexes.BytesByTag(1012)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *PackageFile) XPMImage() []byte {
|
||||||
|
return c.Headers[1].Indexes.BytesByTag(1013)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *PackageFile) License() string {
|
||||||
|
return c.Headers[1].Indexes.StringByTag(1014)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *PackageFile) Packager() string {
|
||||||
|
return c.Headers[1].Indexes.StringByTag(1015)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *PackageFile) Groups() []string {
|
||||||
|
return c.Headers[1].Indexes.StringsByTag(1016)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *PackageFile) ChangeLog() []string {
|
||||||
|
return c.Headers[1].Indexes.StringsByTag(1017)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *PackageFile) Source() []string {
|
||||||
|
return c.Headers[1].Indexes.StringsByTag(1018)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *PackageFile) Patch() []string {
|
||||||
|
return c.Headers[1].Indexes.StringsByTag(1019)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *PackageFile) URL() string {
|
||||||
|
return c.Headers[1].Indexes.StringByTag(1020)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *PackageFile) OperatingSystem() string {
|
||||||
|
return c.Headers[1].Indexes.StringByTag(1021)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *PackageFile) Architecture() string {
|
||||||
|
return c.Headers[1].Indexes.StringByTag(1022)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *PackageFile) PreInstallScript() string {
|
||||||
|
return c.Headers[1].Indexes.StringByTag(1023)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *PackageFile) PostInstallScript() string {
|
||||||
|
return c.Headers[1].Indexes.StringByTag(1024)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *PackageFile) PreUninstallScript() string {
|
||||||
|
return c.Headers[1].Indexes.StringByTag(1025)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *PackageFile) PostUninstallScript() string {
|
||||||
|
return c.Headers[1].Indexes.StringByTag(1026)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *PackageFile) OldFilenames() []string {
|
||||||
|
return c.Headers[1].Indexes.StringsByTag(1027)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *PackageFile) Icon() []byte {
|
||||||
|
return c.Headers[1].Indexes.BytesByTag(1043)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *PackageFile) SourceRPM() string {
|
||||||
|
return c.Headers[1].Indexes.StringByTag(1044)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *PackageFile) RPMVersion() string {
|
||||||
|
return c.Headers[1].Indexes.StringByTag(1064)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *PackageFile) Platform() string {
|
||||||
|
return c.Headers[1].Indexes.StringByTag(1132)
|
||||||
|
}
|
355
vendor/github.com/elastic/beats/dev-tools/vendor/github.com/cavaliercoder/go-rpm/tags.go
generated
vendored
Normal file
355
vendor/github.com/elastic/beats/dev-tools/vendor/github.com/cavaliercoder/go-rpm/tags.go
generated
vendored
Normal file
|
@ -0,0 +1,355 @@
|
||||||
|
package rpm
|
||||||
|
|
||||||
|
// See:
|
||||||
|
// https://github.com/rpm-software-management/rpm/blob/master/lib/rpmtag.h
|
||||||
|
|
||||||
|
// Tags for the signature header.
|
||||||
|
const (
|
||||||
|
RPMSIGTAG_SIZE = 1000
|
||||||
|
RPMSIGTAG_LEMD5_1 = 1001
|
||||||
|
RPMSIGTAG_PGP = 1002
|
||||||
|
RPMSIGTAG_LEMD5_2 = 1003
|
||||||
|
RPMSIGTAG_MD5 = 1004
|
||||||
|
RPMSIGTAG_GPG = 1005
|
||||||
|
RPMSIGTAG_PGP5 = 1006
|
||||||
|
RPMSIGTAG_PAYLOADSIZE = 1007
|
||||||
|
RPMSIGTAG_RESERVEDSPACE = 1008
|
||||||
|
RPMSIGTAG_BADSHA1_1 = RPMTAG_BADSHA1_1
|
||||||
|
RPMSIGTAG_BADSHA1_2 = RPMTAG_BADSHA1_2
|
||||||
|
RPMSIGTAG_SHA1 = RPMTAG_SHA1HEADER
|
||||||
|
RPMSIGTAG_DSA = RPMTAG_DSAHEADER
|
||||||
|
RPMSIGTAG_RSA = RPMTAG_RSAHEADER
|
||||||
|
RPMSIGTAG_LONGSIZE = RPMTAG_LONGSIGSIZE
|
||||||
|
RPMSIGTAG_LONGARCHIVESIZE = RPMTAG_LONGARCHIVESIZE
|
||||||
|
)
|
||||||
|
|
||||||
|
// Tags for the header header.
|
||||||
|
const (
|
||||||
|
RPMTAG_NOT_FOUND = -1
|
||||||
|
|
||||||
|
RPMTAG_HEADERIMAGE = 61
|
||||||
|
RPMTAG_HEADERSIGNATURES = 62
|
||||||
|
RPMTAG_HEADERIMMUTABLE = 63
|
||||||
|
RPMTAG_HEADERREGIONS = 64
|
||||||
|
RPMTAG_HEADERI18NTABLE = 100
|
||||||
|
|
||||||
|
RPMTAG_SIG_BASE = 256
|
||||||
|
RPMTAG_SIGSIZE = 257
|
||||||
|
RPMTAG_SIGLEMD5_1 = 258
|
||||||
|
RPMTAG_SIGPGP = 259
|
||||||
|
RPMTAG_SIGLEMD5_2 = 260
|
||||||
|
RPMTAG_SIGMD5 = 261
|
||||||
|
RPMTAG_PKGID = RPMTAG_SIGMD5
|
||||||
|
RPMTAG_SIGGPG = 262
|
||||||
|
RPMTAG_SIGPGP5 = 263
|
||||||
|
|
||||||
|
RPMTAG_BADSHA1_1 = 264
|
||||||
|
RPMTAG_BADSHA1_2 = 265
|
||||||
|
RPMTAG_PUBKEYS = 266
|
||||||
|
RPMTAG_DSAHEADER = 267
|
||||||
|
RPMTAG_RSAHEADER = 268
|
||||||
|
RPMTAG_SHA1HEADER = 269
|
||||||
|
RPMTAG_HDRID = RPMTAG_SHA1HEADER
|
||||||
|
RPMTAG_LONGSIGSIZE = 270
|
||||||
|
RPMTAG_LONGARCHIVESIZE = 271
|
||||||
|
|
||||||
|
RPMTAG_NAME = 1000
|
||||||
|
RPMTAG_N = RPMTAG_NAME
|
||||||
|
RPMTAG_VERSION = 1001
|
||||||
|
RPMTAG_V = RPMTAG_VERSION
|
||||||
|
RPMTAG_RELEASE = 1002
|
||||||
|
RPMTAG_R = RPMTAG_RELEASE
|
||||||
|
RPMTAG_EPOCH = 1003
|
||||||
|
RPMTAG_E = RPMTAG_EPOCH
|
||||||
|
RPMTAG_SUMMARY = 1004
|
||||||
|
RPMTAG_DESCRIPTION = 1005
|
||||||
|
RPMTAG_BUILDTIME = 1006
|
||||||
|
RPMTAG_BUILDHOST = 1007
|
||||||
|
RPMTAG_INSTALLTIME = 1008
|
||||||
|
RPMTAG_SIZE = 1009
|
||||||
|
RPMTAG_DISTRIBUTION = 1010
|
||||||
|
RPMTAG_VENDOR = 1011
|
||||||
|
RPMTAG_GIF = 1012
|
||||||
|
RPMTAG_XPM = 1013
|
||||||
|
RPMTAG_LICENSE = 1014
|
||||||
|
RPMTAG_PACKAGER = 1015
|
||||||
|
RPMTAG_GROUP = 1016
|
||||||
|
RPMTAG_CHANGELOG = 1017
|
||||||
|
RPMTAG_SOURCE = 1018
|
||||||
|
RPMTAG_PATCH = 1019
|
||||||
|
RPMTAG_URL = 1020
|
||||||
|
RPMTAG_OS = 1021
|
||||||
|
RPMTAG_ARCH = 1022
|
||||||
|
RPMTAG_PREIN = 1023
|
||||||
|
RPMTAG_POSTIN = 1024
|
||||||
|
RPMTAG_PREUN = 1025
|
||||||
|
RPMTAG_POSTUN = 1026
|
||||||
|
RPMTAG_OLDFILENAMES = 1027
|
||||||
|
RPMTAG_FILESIZES = 1028
|
||||||
|
RPMTAG_FILESTATES = 1029
|
||||||
|
RPMTAG_FILEMODES = 1030
|
||||||
|
RPMTAG_FILEUIDS = 1031
|
||||||
|
RPMTAG_FILEGIDS = 1032
|
||||||
|
RPMTAG_FILERDEVS = 1033
|
||||||
|
RPMTAG_FILEMTIMES = 1034
|
||||||
|
RPMTAG_FILEDIGESTS = 1035
|
||||||
|
RPMTAG_FILEMD5S = RPMTAG_FILEDIGESTS
|
||||||
|
RPMTAG_FILELINKTOS = 1036
|
||||||
|
RPMTAG_FILEFLAGS = 1037
|
||||||
|
RPMTAG_ROOT = 1038
|
||||||
|
RPMTAG_FILEUSERNAME = 1039
|
||||||
|
RPMTAG_FILEGROUPNAME = 1040
|
||||||
|
RPMTAG_EXCLUDE = 1041
|
||||||
|
RPMTAG_EXCLUSIVE = 1042
|
||||||
|
RPMTAG_ICON = 1043
|
||||||
|
RPMTAG_SOURCERPM = 1044
|
||||||
|
RPMTAG_FILEVERIFYFLAGS = 1045
|
||||||
|
RPMTAG_ARCHIVESIZE = 1046
|
||||||
|
RPMTAG_PROVIDENAME = 1047
|
||||||
|
RPMTAG_PROVIDES = RPMTAG_PROVIDENAME
|
||||||
|
RPMTAG_P = RPMTAG_PROVIDENAME
|
||||||
|
RPMTAG_REQUIREFLAGS = 1048
|
||||||
|
RPMTAG_REQUIRENAME = 1049
|
||||||
|
RPMTAG_REQUIRES = RPMTAG_REQUIRENAME
|
||||||
|
RPMTAG_REQUIREVERSION = 1050
|
||||||
|
RPMTAG_NOSOURCE = 1051
|
||||||
|
RPMTAG_NOPATCH = 1052
|
||||||
|
RPMTAG_CONFLICTFLAGS = 1053
|
||||||
|
RPMTAG_CONFLICTNAME = 1054
|
||||||
|
RPMTAG_CONFLICTS = RPMTAG_CONFLICTNAME
|
||||||
|
RPMTAG_C = RPMTAG_CONFLICTNAME
|
||||||
|
RPMTAG_CONFLICTVERSION = 1055
|
||||||
|
RPMTAG_DEFAULTPREFIX = 1056
|
||||||
|
RPMTAG_BUILDROOT = 1057
|
||||||
|
RPMTAG_INSTALLPREFIX = 1058
|
||||||
|
RPMTAG_EXCLUDEARCH = 1059
|
||||||
|
RPMTAG_EXCLUDEOS = 1060
|
||||||
|
RPMTAG_EXCLUSIVEARCH = 1061
|
||||||
|
RPMTAG_EXCLUSIVEOS = 1062
|
||||||
|
RPMTAG_AUTOREQPROV = 1063
|
||||||
|
RPMTAG_RPMVERSION = 1064
|
||||||
|
RPMTAG_TRIGGERSCRIPTS = 1065
|
||||||
|
RPMTAG_TRIGGERNAME = 1066
|
||||||
|
RPMTAG_TRIGGERVERSION = 1067
|
||||||
|
RPMTAG_TRIGGERFLAGS = 1068
|
||||||
|
RPMTAG_TRIGGERINDEX = 1069
|
||||||
|
RPMTAG_VERIFYSCRIPT = 1079
|
||||||
|
RPMTAG_CHANGELOGTIME = 1080
|
||||||
|
RPMTAG_CHANGELOGNAME = 1081
|
||||||
|
RPMTAG_CHANGELOGTEXT = 1082
|
||||||
|
RPMTAG_BROKENMD5 = 1083
|
||||||
|
RPMTAG_PREREQ = 1084
|
||||||
|
RPMTAG_PREINPROG = 1085
|
||||||
|
RPMTAG_POSTINPROG = 1086
|
||||||
|
RPMTAG_PREUNPROG = 1087
|
||||||
|
RPMTAG_POSTUNPROG = 1088
|
||||||
|
RPMTAG_BUILDARCHS = 1089
|
||||||
|
RPMTAG_OBSOLETENAME = 1090
|
||||||
|
RPMTAG_OBSOLETES = RPMTAG_OBSOLETENAME
|
||||||
|
RPMTAG_O = RPMTAG_OBSOLETENAME
|
||||||
|
RPMTAG_VERIFYSCRIPTPROG = 1091
|
||||||
|
RPMTAG_TRIGGERSCRIPTPROG = 1092
|
||||||
|
RPMTAG_DOCDIR = 1093
|
||||||
|
RPMTAG_COOKIE = 1094
|
||||||
|
RPMTAG_FILEDEVICES = 1095
|
||||||
|
RPMTAG_FILEINODES = 1096
|
||||||
|
RPMTAG_FILELANGS = 1097
|
||||||
|
RPMTAG_PREFIXES = 1098
|
||||||
|
RPMTAG_INSTPREFIXES = 1099
|
||||||
|
RPMTAG_TRIGGERIN = 1100
|
||||||
|
RPMTAG_TRIGGERUN = 1101
|
||||||
|
RPMTAG_TRIGGERPOSTUN = 1102
|
||||||
|
RPMTAG_AUTOREQ = 1103
|
||||||
|
RPMTAG_AUTOPROV = 1104
|
||||||
|
RPMTAG_CAPABILITY = 1105
|
||||||
|
RPMTAG_SOURCEPACKAGE = 1106
|
||||||
|
RPMTAG_OLDORIGFILENAMES = 1107
|
||||||
|
RPMTAG_BUILDPREREQ = 1108
|
||||||
|
RPMTAG_BUILDREQUIRES = 1109
|
||||||
|
RPMTAG_BUILDCONFLICTS = 1110
|
||||||
|
RPMTAG_BUILDMACROS = 1111
|
||||||
|
RPMTAG_PROVIDEFLAGS = 1112
|
||||||
|
RPMTAG_PROVIDEVERSION = 1113
|
||||||
|
RPMTAG_OBSOLETEFLAGS = 1114
|
||||||
|
RPMTAG_OBSOLETEVERSION = 1115
|
||||||
|
RPMTAG_DIRINDEXES = 1116
|
||||||
|
RPMTAG_BASENAMES = 1117
|
||||||
|
RPMTAG_DIRNAMES = 1118
|
||||||
|
RPMTAG_ORIGDIRINDEXES = 1119
|
||||||
|
RPMTAG_ORIGBASENAMES = 1120
|
||||||
|
RPMTAG_ORIGDIRNAMES = 1121
|
||||||
|
RPMTAG_OPTFLAGS = 1122
|
||||||
|
RPMTAG_DISTURL = 1123
|
||||||
|
RPMTAG_PAYLOADFORMAT = 1124
|
||||||
|
RPMTAG_PAYLOADCOMPRESSOR = 1125
|
||||||
|
RPMTAG_PAYLOADFLAGS = 1126
|
||||||
|
RPMTAG_INSTALLCOLOR = 1127
|
||||||
|
RPMTAG_INSTALLTID = 1128
|
||||||
|
RPMTAG_REMOVETID = 1129
|
||||||
|
RPMTAG_SHA1RHN = 1130
|
||||||
|
RPMTAG_RHNPLATFORM = 1131
|
||||||
|
RPMTAG_PLATFORM = 1132
|
||||||
|
RPMTAG_PATCHESNAME = 1133
|
||||||
|
RPMTAG_PATCHESFLAGS = 1134
|
||||||
|
RPMTAG_PATCHESVERSION = 1135
|
||||||
|
RPMTAG_CACHECTIME = 1136
|
||||||
|
RPMTAG_CACHEPKGPATH = 1137
|
||||||
|
RPMTAG_CACHEPKGSIZE = 1138
|
||||||
|
RPMTAG_CACHEPKGMTIME = 1139
|
||||||
|
RPMTAG_FILECOLORS = 1140
|
||||||
|
RPMTAG_FILECLASS = 1141
|
||||||
|
RPMTAG_CLASSDICT = 1142
|
||||||
|
RPMTAG_FILEDEPENDSX = 1143
|
||||||
|
RPMTAG_FILEDEPENDSN = 1144
|
||||||
|
RPMTAG_DEPENDSDICT = 1145
|
||||||
|
RPMTAG_SOURCEPKGID = 1146
|
||||||
|
RPMTAG_FILECONTEXTS = 1147
|
||||||
|
RPMTAG_FSCONTEXTS = 1148
|
||||||
|
RPMTAG_RECONTEXTS = 1149
|
||||||
|
RPMTAG_POLICIES = 1150
|
||||||
|
RPMTAG_PRETRANS = 1151
|
||||||
|
RPMTAG_POSTTRANS = 1152
|
||||||
|
RPMTAG_PRETRANSPROG = 1153
|
||||||
|
RPMTAG_POSTTRANSPROG = 1154
|
||||||
|
RPMTAG_DISTTAG = 1155
|
||||||
|
RPMTAG_OLDSUGGESTSNAME = 1156
|
||||||
|
RPMTAG_OLDSUGGESTS = RPMTAG_OLDSUGGESTSNAME
|
||||||
|
RPMTAG_OLDSUGGESTSVERSION = 1157
|
||||||
|
RPMTAG_OLDSUGGESTSFLAGS = 1158
|
||||||
|
RPMTAG_OLDENHANCESNAME = 1159
|
||||||
|
RPMTAG_OLDENHANCES = RPMTAG_OLDENHANCESNAME
|
||||||
|
RPMTAG_OLDENHANCESVERSION = 1160
|
||||||
|
RPMTAG_OLDENHANCESFLAGS = 1161
|
||||||
|
RPMTAG_PRIORITY = 1162
|
||||||
|
RPMTAG_CVSID = 1163
|
||||||
|
RPMTAG_SVNID = RPMTAG_CVSID
|
||||||
|
RPMTAG_BLINKPKGID = 1164
|
||||||
|
RPMTAG_BLINKHDRID = 1165
|
||||||
|
RPMTAG_BLINKNEVRA = 1166
|
||||||
|
RPMTAG_FLINKPKGID = 1167
|
||||||
|
RPMTAG_FLINKHDRID = 1168
|
||||||
|
RPMTAG_FLINKNEVRA = 1169
|
||||||
|
RPMTAG_PACKAGEORIGIN = 1170
|
||||||
|
RPMTAG_TRIGGERPREIN = 1171
|
||||||
|
RPMTAG_BUILDSUGGESTS = 1172
|
||||||
|
RPMTAG_BUILDENHANCES = 1173
|
||||||
|
RPMTAG_SCRIPTSTATES = 1174
|
||||||
|
RPMTAG_SCRIPTMETRICS = 1175
|
||||||
|
RPMTAG_BUILDCPUCLOCK = 1176
|
||||||
|
RPMTAG_FILEDIGESTALGOS = 1177
|
||||||
|
RPMTAG_VARIANTS = 1178
|
||||||
|
RPMTAG_XMAJOR = 1179
|
||||||
|
RPMTAG_XMINOR = 1180
|
||||||
|
RPMTAG_REPOTAG = 1181
|
||||||
|
RPMTAG_KEYWORDS = 1182
|
||||||
|
RPMTAG_BUILDPLATFORMS = 1183
|
||||||
|
RPMTAG_PACKAGECOLOR = 1184
|
||||||
|
RPMTAG_PACKAGEPREFCOLOR = 1185
|
||||||
|
RPMTAG_XATTRSDICT = 1186
|
||||||
|
RPMTAG_FILEXATTRSX = 1187
|
||||||
|
RPMTAG_DEPATTRSDICT = 1188
|
||||||
|
RPMTAG_CONFLICTATTRSX = 1189
|
||||||
|
RPMTAG_OBSOLETEATTRSX = 1190
|
||||||
|
RPMTAG_PROVIDEATTRSX = 1191
|
||||||
|
RPMTAG_REQUIREATTRSX = 1192
|
||||||
|
RPMTAG_BUILDPROVIDES = 1193
|
||||||
|
RPMTAG_BUILDOBSOLETES = 1194
|
||||||
|
RPMTAG_DBINSTANCE = 1195
|
||||||
|
RPMTAG_NVRA = 1196
|
||||||
|
|
||||||
|
RPMTAG_FILENAMES = 5000
|
||||||
|
RPMTAG_FILEPROVIDE = 5001
|
||||||
|
RPMTAG_FILEREQUIRE = 5002
|
||||||
|
RPMTAG_FSNAMES = 5003
|
||||||
|
RPMTAG_FSSIZES = 5004
|
||||||
|
RPMTAG_TRIGGERCONDS = 5005
|
||||||
|
RPMTAG_TRIGGERTYPE = 5006
|
||||||
|
RPMTAG_ORIGFILENAMES = 5007
|
||||||
|
RPMTAG_LONGFILESIZES = 5008
|
||||||
|
RPMTAG_LONGSIZE = 5009
|
||||||
|
RPMTAG_FILECAPS = 5010
|
||||||
|
RPMTAG_FILEDIGESTALGO = 5011
|
||||||
|
RPMTAG_BUGURL = 5012
|
||||||
|
RPMTAG_EVR = 5013
|
||||||
|
RPMTAG_NVR = 5014
|
||||||
|
RPMTAG_NEVR = 5015
|
||||||
|
RPMTAG_NEVRA = 5016
|
||||||
|
RPMTAG_HEADERCOLOR = 5017
|
||||||
|
RPMTAG_VERBOSE = 5018
|
||||||
|
RPMTAG_EPOCHNUM = 5019
|
||||||
|
RPMTAG_PREINFLAGS = 5020
|
||||||
|
RPMTAG_POSTINFLAGS = 5021
|
||||||
|
RPMTAG_PREUNFLAGS = 5022
|
||||||
|
RPMTAG_POSTUNFLAGS = 5023
|
||||||
|
RPMTAG_PRETRANSFLAGS = 5024
|
||||||
|
RPMTAG_POSTTRANSFLAGS = 5025
|
||||||
|
RPMTAG_VERIFYSCRIPTFLAGS = 5026
|
||||||
|
RPMTAG_TRIGGERSCRIPTFLAGS = 5027
|
||||||
|
RPMTAG_COLLECTIONS = 5029
|
||||||
|
RPMTAG_POLICYNAMES = 5030
|
||||||
|
RPMTAG_POLICYTYPES = 5031
|
||||||
|
RPMTAG_POLICYTYPESINDEXES = 5032
|
||||||
|
RPMTAG_POLICYFLAGS = 5033
|
||||||
|
RPMTAG_VCS = 5034
|
||||||
|
RPMTAG_ORDERNAME = 5035
|
||||||
|
RPMTAG_ORDERVERSION = 5036
|
||||||
|
RPMTAG_ORDERFLAGS = 5037
|
||||||
|
RPMTAG_MSSFMANIFEST = 5038
|
||||||
|
RPMTAG_MSSFDOMAIN = 5039
|
||||||
|
RPMTAG_INSTFILENAMES = 5040
|
||||||
|
RPMTAG_REQUIRENEVRS = 5041
|
||||||
|
RPMTAG_PROVIDENEVRS = 5042
|
||||||
|
RPMTAG_OBSOLETENEVRS = 5043
|
||||||
|
RPMTAG_CONFLICTNEVRS = 5044
|
||||||
|
RPMTAG_FILENLINKS = 5045
|
||||||
|
RPMTAG_RECOMMENDNAME = 5046
|
||||||
|
RPMTAG_RECOMMENDS = RPMTAG_RECOMMENDNAME
|
||||||
|
RPMTAG_RECOMMENDVERSION = 5047
|
||||||
|
RPMTAG_RECOMMENDFLAGS = 5048
|
||||||
|
RPMTAG_SUGGESTNAME = 5049
|
||||||
|
RPMTAG_SUGGESTS = RPMTAG_SUGGESTNAME
|
||||||
|
RPMTAG_SUGGESTVERSION = 5050
|
||||||
|
RPMTAG_SUGGESTFLAGS = 5051
|
||||||
|
RPMTAG_SUPPLEMENTNAME = 5052
|
||||||
|
RPMTAG_SUPPLEMENTS = RPMTAG_SUPPLEMENTNAME
|
||||||
|
RPMTAG_SUPPLEMENTVERSION = 5053
|
||||||
|
RPMTAG_SUPPLEMENTFLAGS = 5054
|
||||||
|
RPMTAG_ENHANCENAME = 5055
|
||||||
|
RPMTAG_ENHANCES = RPMTAG_ENHANCENAME
|
||||||
|
RPMTAG_ENHANCEVERSION = 5056
|
||||||
|
RPMTAG_ENHANCEFLAGS = 5057
|
||||||
|
RPMTAG_RECOMMENDNEVRS = 5058
|
||||||
|
RPMTAG_SUGGESTNEVRS = 5059
|
||||||
|
RPMTAG_SUPPLEMENTNEVRS = 5060
|
||||||
|
RPMTAG_ENHANCENEVRS = 5061
|
||||||
|
RPMTAG_ENCODING = 5062
|
||||||
|
RPMTAG_FILETRIGGERIN = 5063
|
||||||
|
RPMTAG_FILETRIGGERUN = 5064
|
||||||
|
RPMTAG_FILETRIGGERPOSTUN = 5065
|
||||||
|
RPMTAG_FILETRIGGERSCRIPTS = 5066
|
||||||
|
RPMTAG_FILETRIGGERSCRIPTPROG = 5067
|
||||||
|
RPMTAG_FILETRIGGERSCRIPTFLAGS = 5068
|
||||||
|
RPMTAG_FILETRIGGERNAME = 5069
|
||||||
|
RPMTAG_FILETRIGGERINDEX = 5070
|
||||||
|
RPMTAG_FILETRIGGERVERSION = 5071
|
||||||
|
RPMTAG_FILETRIGGERFLAGS = 5072
|
||||||
|
RPMTAG_TRANSFILETRIGGERIN = 5073
|
||||||
|
RPMTAG_TRANSFILETRIGGERUN = 5074
|
||||||
|
RPMTAG_TRANSFILETRIGGERPOSTUN = 5075
|
||||||
|
RPMTAG_TRANSFILETRIGGERSCRIPTS = 5076
|
||||||
|
RPMTAG_TRANSFILETRIGGERSCRIPTPROG = 5077
|
||||||
|
RPMTAG_TRANSFILETRIGGERSCRIPTFLAGS = 5078
|
||||||
|
RPMTAG_TRANSFILETRIGGERNAME = 5079
|
||||||
|
RPMTAG_TRANSFILETRIGGERINDEX = 5080
|
||||||
|
RPMTAG_TRANSFILETRIGGERVERSION = 5081
|
||||||
|
RPMTAG_TRANSFILETRIGGERFLAGS = 5082
|
||||||
|
RPMTAG_REMOVEPATHPOSTFIXES = 5083
|
||||||
|
RPMTAG_FILETRIGGERPRIORITIES = 5084
|
||||||
|
RPMTAG_TRANSFILETRIGGERPRIORITIES = 5085
|
||||||
|
RPMTAG_FILETRIGGERCONDS = 5086
|
||||||
|
RPMTAG_FILETRIGGERTYPE = 5087
|
||||||
|
RPMTAG_TRANSFILETRIGGERCONDS = 5088
|
||||||
|
RPMTAG_TRANSFILETRIGGERTYPE = 5089
|
||||||
|
RPMTAG_FILESIGNATURES = 5090
|
||||||
|
RPMTAG_FILESIGNATURELENGTH = 5091
|
||||||
|
)
|
138
vendor/github.com/elastic/beats/dev-tools/vendor/github.com/cavaliercoder/go-rpm/version.go
generated
vendored
Normal file
138
vendor/github.com/elastic/beats/dev-tools/vendor/github.com/cavaliercoder/go-rpm/version.go
generated
vendored
Normal file
|
@ -0,0 +1,138 @@
|
||||||
|
package rpm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
"unicode"
|
||||||
|
)
|
||||||
|
|
||||||
|
// alphanumPattern is a regular expression to match all sequences of numeric
|
||||||
|
// characters or alphanumeric characters.
|
||||||
|
var alphanumPattern = regexp.MustCompile("([a-zA-Z]+)|([0-9]+)")
|
||||||
|
|
||||||
|
// PackageVersion is an interface which holds version information for a single
|
||||||
|
// package version.
|
||||||
|
type PackageVersion interface {
|
||||||
|
Name() string
|
||||||
|
Version() string
|
||||||
|
Release() string
|
||||||
|
Epoch() int
|
||||||
|
}
|
||||||
|
|
||||||
|
// VersionCompare compares the version details of two packages. Versions are
|
||||||
|
// compared by Epoch, Version and Release in descending order of precedence.
|
||||||
|
//
|
||||||
|
// If a is more recent than b, 1 is returned. If a is less recent than b, -1 is
|
||||||
|
// returned. If a and b are equal, 0 is returned.
|
||||||
|
//
|
||||||
|
// This function does not consider if the two packages have the same name or if
|
||||||
|
// either package has been made obsolete by the other.
|
||||||
|
func VersionCompare(a PackageVersion, b PackageVersion) int {
|
||||||
|
// compare nils
|
||||||
|
if a == nil && b == nil {
|
||||||
|
return 0
|
||||||
|
} else if a == nil {
|
||||||
|
return -1
|
||||||
|
} else if b == nil {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// compare epoch
|
||||||
|
ae := a.Epoch()
|
||||||
|
be := b.Epoch()
|
||||||
|
if ae != be {
|
||||||
|
if ae > be {
|
||||||
|
return 1
|
||||||
|
} else {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// compare version
|
||||||
|
if rc := rpmvercmp(a.Version(), b.Version()); rc != 0 {
|
||||||
|
return rc
|
||||||
|
}
|
||||||
|
|
||||||
|
// compare release
|
||||||
|
return rpmvercmp(a.Release(), b.Release())
|
||||||
|
}
|
||||||
|
|
||||||
|
// LatestPackage returns the package with the highest version in the given slice
|
||||||
|
// of PackageVersions.
|
||||||
|
func LatestPackage(v ...PackageVersion) PackageVersion {
|
||||||
|
var latest PackageVersion
|
||||||
|
for _, p := range v {
|
||||||
|
if 1 == VersionCompare(p, latest) {
|
||||||
|
latest = p
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return latest
|
||||||
|
}
|
||||||
|
|
||||||
|
// rpmcmpver compares two version or release strings.
|
||||||
|
//
|
||||||
|
// For the original C implementation, see:
|
||||||
|
// https://github.com/rpm-software-management/rpm/blob/master/lib/rpmvercmp.c#L16
|
||||||
|
func rpmvercmp(a, b string) int {
|
||||||
|
// shortcut for equality
|
||||||
|
if a == b {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// get alpha/numeric segements
|
||||||
|
segsa := alphanumPattern.FindAllString(a, -1)
|
||||||
|
segsb := alphanumPattern.FindAllString(b, -1)
|
||||||
|
segs := int(math.Min(float64(len(segsa)), float64(len(segsb))))
|
||||||
|
|
||||||
|
// TODO: handle tildes in rpmvercmp
|
||||||
|
|
||||||
|
// compare each segment
|
||||||
|
for i := 0; i < segs; i++ {
|
||||||
|
a := segsa[i]
|
||||||
|
b := segsb[i]
|
||||||
|
|
||||||
|
if unicode.IsNumber([]rune(a)[0]) {
|
||||||
|
// numbers are always greater than alphas
|
||||||
|
if !unicode.IsNumber([]rune(b)[0]) {
|
||||||
|
// a is numeric, b is alpha
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// trim leading zeros
|
||||||
|
a = strings.TrimLeft(a, "0")
|
||||||
|
b = strings.TrimLeft(b, "0")
|
||||||
|
|
||||||
|
// longest string wins without further comparison
|
||||||
|
if len(a) > len(b) {
|
||||||
|
return 1
|
||||||
|
} else if len(b) > len(a) {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
} else if unicode.IsNumber([]rune(b)[0]) {
|
||||||
|
// a is alpha, b is numeric
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
// string compare
|
||||||
|
if a < b {
|
||||||
|
return -1
|
||||||
|
} else if a > b {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// segments were all the same but separators must have been different
|
||||||
|
if len(segsa) == len(segsb) {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// whoever has the most segments wins
|
||||||
|
if len(segsa) > len(segsb) {
|
||||||
|
return 1
|
||||||
|
} else {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
}
|
3
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/AUTHORS
generated
vendored
Normal file
3
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/AUTHORS
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
# This source code refers to The Go Authors for copyright purposes.
|
||||||
|
# The master list of authors is in the main Go distribution,
|
||||||
|
# visible at http://tip.golang.org/AUTHORS.
|
31
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/CONTRIBUTING.md
generated
vendored
Normal file
31
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/CONTRIBUTING.md
generated
vendored
Normal file
|
@ -0,0 +1,31 @@
|
||||||
|
# Contributing to Go
|
||||||
|
|
||||||
|
Go is an open source project.
|
||||||
|
|
||||||
|
It is the work of hundreds of contributors. We appreciate your help!
|
||||||
|
|
||||||
|
|
||||||
|
## Filing issues
|
||||||
|
|
||||||
|
When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions:
|
||||||
|
|
||||||
|
1. What version of Go are you using (`go version`)?
|
||||||
|
2. What operating system and processor architecture are you using?
|
||||||
|
3. What did you do?
|
||||||
|
4. What did you expect to see?
|
||||||
|
5. What did you see instead?
|
||||||
|
|
||||||
|
General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker.
|
||||||
|
The gophers there will answer or ask you to file an issue if you've tripped over a bug.
|
||||||
|
|
||||||
|
## Contributing code
|
||||||
|
|
||||||
|
Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html)
|
||||||
|
before sending patches.
|
||||||
|
|
||||||
|
**We do not accept GitHub pull requests**
|
||||||
|
(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review).
|
||||||
|
|
||||||
|
Unless otherwise noted, the Go source files are distributed under
|
||||||
|
the BSD-style license found in the LICENSE file.
|
||||||
|
|
3
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/CONTRIBUTORS
generated
vendored
Normal file
3
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/CONTRIBUTORS
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
# This source code was written by the Go contributors.
|
||||||
|
# The master list of contributors is in the main Go distribution,
|
||||||
|
# visible at http://tip.golang.org/CONTRIBUTORS.
|
|
@ -1,16 +1,16 @@
|
||||||
Copyright (c) 2012 Rodrigo Moraes. All rights reserved.
|
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
Redistribution and use in source and binary forms, with or without
|
||||||
modification, are permitted provided that the following conditions are
|
modification, are permitted provided that the following conditions are
|
||||||
met:
|
met:
|
||||||
|
|
||||||
* Redistributions of source code must retain the above copyright
|
* Redistributions of source code must retain the above copyright
|
||||||
notice, this list of conditions and the following disclaimer.
|
notice, this list of conditions and the following disclaimer.
|
||||||
* Redistributions in binary form must reproduce the above
|
* Redistributions in binary form must reproduce the above
|
||||||
copyright notice, this list of conditions and the following disclaimer
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
in the documentation and/or other materials provided with the
|
in the documentation and/or other materials provided with the
|
||||||
distribution.
|
distribution.
|
||||||
* Neither the name of Google Inc. nor the names of its
|
* Neither the name of Google Inc. nor the names of its
|
||||||
contributors may be used to endorse or promote products derived from
|
contributors may be used to endorse or promote products derived from
|
||||||
this software without specific prior written permission.
|
this software without specific prior written permission.
|
||||||
|
|
22
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/PATENTS
generated
vendored
Normal file
22
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/PATENTS
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
Additional IP Rights Grant (Patents)
|
||||||
|
|
||||||
|
"This implementation" means the copyrightable works distributed by
|
||||||
|
Google as part of the Go project.
|
||||||
|
|
||||||
|
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||||
|
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||||
|
patent license to make, have made, use, offer to sell, sell, import,
|
||||||
|
transfer and otherwise run, modify and propagate the contents of this
|
||||||
|
implementation of Go, where such license applies only to those patent
|
||||||
|
claims, both currently owned or controlled by Google and acquired in
|
||||||
|
the future, licensable by Google that are necessarily infringed by this
|
||||||
|
implementation of Go. This grant does not include claims that would be
|
||||||
|
infringed only as a consequence of further modification of this
|
||||||
|
implementation. If you or your agent or exclusive licensee institute or
|
||||||
|
order or agree to the institution of patent litigation against any
|
||||||
|
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||||
|
that this implementation of Go or any code incorporated within this
|
||||||
|
implementation of Go constitutes direct or contributory patent
|
||||||
|
infringement, or inducement of patent infringement, then any patent
|
||||||
|
rights granted to you under this License for this implementation of Go
|
||||||
|
shall terminate as of the date such litigation is filed.
|
3
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/README
generated
vendored
Normal file
3
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/README
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
This repository holds supplementary Go cryptography libraries.
|
||||||
|
|
||||||
|
To submit changes to this repository, see http://golang.org/doc/contribute.html.
|
526
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/cast5/cast5.go
generated
vendored
Normal file
526
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/cast5/cast5.go
generated
vendored
Normal file
|
@ -0,0 +1,526 @@
|
||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package cast5 implements CAST5, as defined in RFC 2144. CAST5 is a common
|
||||||
|
// OpenPGP cipher.
|
||||||
|
package cast5 // import "golang.org/x/crypto/cast5"
|
||||||
|
|
||||||
|
import "errors"
|
||||||
|
|
||||||
|
const BlockSize = 8
|
||||||
|
const KeySize = 16
|
||||||
|
|
||||||
|
type Cipher struct {
|
||||||
|
masking [16]uint32
|
||||||
|
rotate [16]uint8
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewCipher(key []byte) (c *Cipher, err error) {
|
||||||
|
if len(key) != KeySize {
|
||||||
|
return nil, errors.New("CAST5: keys must be 16 bytes")
|
||||||
|
}
|
||||||
|
|
||||||
|
c = new(Cipher)
|
||||||
|
c.keySchedule(key)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cipher) BlockSize() int {
|
||||||
|
return BlockSize
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cipher) Encrypt(dst, src []byte) {
|
||||||
|
l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
|
||||||
|
r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
|
||||||
|
|
||||||
|
l, r = r, l^f1(r, c.masking[0], c.rotate[0])
|
||||||
|
l, r = r, l^f2(r, c.masking[1], c.rotate[1])
|
||||||
|
l, r = r, l^f3(r, c.masking[2], c.rotate[2])
|
||||||
|
l, r = r, l^f1(r, c.masking[3], c.rotate[3])
|
||||||
|
|
||||||
|
l, r = r, l^f2(r, c.masking[4], c.rotate[4])
|
||||||
|
l, r = r, l^f3(r, c.masking[5], c.rotate[5])
|
||||||
|
l, r = r, l^f1(r, c.masking[6], c.rotate[6])
|
||||||
|
l, r = r, l^f2(r, c.masking[7], c.rotate[7])
|
||||||
|
|
||||||
|
l, r = r, l^f3(r, c.masking[8], c.rotate[8])
|
||||||
|
l, r = r, l^f1(r, c.masking[9], c.rotate[9])
|
||||||
|
l, r = r, l^f2(r, c.masking[10], c.rotate[10])
|
||||||
|
l, r = r, l^f3(r, c.masking[11], c.rotate[11])
|
||||||
|
|
||||||
|
l, r = r, l^f1(r, c.masking[12], c.rotate[12])
|
||||||
|
l, r = r, l^f2(r, c.masking[13], c.rotate[13])
|
||||||
|
l, r = r, l^f3(r, c.masking[14], c.rotate[14])
|
||||||
|
l, r = r, l^f1(r, c.masking[15], c.rotate[15])
|
||||||
|
|
||||||
|
dst[0] = uint8(r >> 24)
|
||||||
|
dst[1] = uint8(r >> 16)
|
||||||
|
dst[2] = uint8(r >> 8)
|
||||||
|
dst[3] = uint8(r)
|
||||||
|
dst[4] = uint8(l >> 24)
|
||||||
|
dst[5] = uint8(l >> 16)
|
||||||
|
dst[6] = uint8(l >> 8)
|
||||||
|
dst[7] = uint8(l)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cipher) Decrypt(dst, src []byte) {
|
||||||
|
l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
|
||||||
|
r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
|
||||||
|
|
||||||
|
l, r = r, l^f1(r, c.masking[15], c.rotate[15])
|
||||||
|
l, r = r, l^f3(r, c.masking[14], c.rotate[14])
|
||||||
|
l, r = r, l^f2(r, c.masking[13], c.rotate[13])
|
||||||
|
l, r = r, l^f1(r, c.masking[12], c.rotate[12])
|
||||||
|
|
||||||
|
l, r = r, l^f3(r, c.masking[11], c.rotate[11])
|
||||||
|
l, r = r, l^f2(r, c.masking[10], c.rotate[10])
|
||||||
|
l, r = r, l^f1(r, c.masking[9], c.rotate[9])
|
||||||
|
l, r = r, l^f3(r, c.masking[8], c.rotate[8])
|
||||||
|
|
||||||
|
l, r = r, l^f2(r, c.masking[7], c.rotate[7])
|
||||||
|
l, r = r, l^f1(r, c.masking[6], c.rotate[6])
|
||||||
|
l, r = r, l^f3(r, c.masking[5], c.rotate[5])
|
||||||
|
l, r = r, l^f2(r, c.masking[4], c.rotate[4])
|
||||||
|
|
||||||
|
l, r = r, l^f1(r, c.masking[3], c.rotate[3])
|
||||||
|
l, r = r, l^f3(r, c.masking[2], c.rotate[2])
|
||||||
|
l, r = r, l^f2(r, c.masking[1], c.rotate[1])
|
||||||
|
l, r = r, l^f1(r, c.masking[0], c.rotate[0])
|
||||||
|
|
||||||
|
dst[0] = uint8(r >> 24)
|
||||||
|
dst[1] = uint8(r >> 16)
|
||||||
|
dst[2] = uint8(r >> 8)
|
||||||
|
dst[3] = uint8(r)
|
||||||
|
dst[4] = uint8(l >> 24)
|
||||||
|
dst[5] = uint8(l >> 16)
|
||||||
|
dst[6] = uint8(l >> 8)
|
||||||
|
dst[7] = uint8(l)
|
||||||
|
}
|
||||||
|
|
||||||
|
type keyScheduleA [4][7]uint8
|
||||||
|
type keyScheduleB [4][5]uint8
|
||||||
|
|
||||||
|
// keyScheduleRound contains the magic values for a round of the key schedule.
|
||||||
|
// The keyScheduleA deals with the lines like:
|
||||||
|
// z0z1z2z3 = x0x1x2x3 ^ S5[xD] ^ S6[xF] ^ S7[xC] ^ S8[xE] ^ S7[x8]
|
||||||
|
// Conceptually, both x and z are in the same array, x first. The first
|
||||||
|
// element describes which word of this array gets written to and the
|
||||||
|
// second, which word gets read. So, for the line above, it's "4, 0", because
|
||||||
|
// it's writing to the first word of z, which, being after x, is word 4, and
|
||||||
|
// reading from the first word of x: word 0.
|
||||||
|
//
|
||||||
|
// Next are the indexes into the S-boxes. Now the array is treated as bytes. So
|
||||||
|
// "xD" is 0xd. The first byte of z is written as "16 + 0", just to be clear
|
||||||
|
// that it's z that we're indexing.
|
||||||
|
//
|
||||||
|
// keyScheduleB deals with lines like:
|
||||||
|
// K1 = S5[z8] ^ S6[z9] ^ S7[z7] ^ S8[z6] ^ S5[z2]
|
||||||
|
// "K1" is ignored because key words are always written in order. So the five
|
||||||
|
// elements are the S-box indexes. They use the same form as in keyScheduleA,
|
||||||
|
// above.
|
||||||
|
|
||||||
|
type keyScheduleRound struct{}
|
||||||
|
type keySchedule []keyScheduleRound
|
||||||
|
|
||||||
|
var schedule = []struct {
|
||||||
|
a keyScheduleA
|
||||||
|
b keyScheduleB
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
keyScheduleA{
|
||||||
|
{4, 0, 0xd, 0xf, 0xc, 0xe, 0x8},
|
||||||
|
{5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa},
|
||||||
|
{6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9},
|
||||||
|
{7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb},
|
||||||
|
},
|
||||||
|
keyScheduleB{
|
||||||
|
{16 + 8, 16 + 9, 16 + 7, 16 + 6, 16 + 2},
|
||||||
|
{16 + 0xa, 16 + 0xb, 16 + 5, 16 + 4, 16 + 6},
|
||||||
|
{16 + 0xc, 16 + 0xd, 16 + 3, 16 + 2, 16 + 9},
|
||||||
|
{16 + 0xe, 16 + 0xf, 16 + 1, 16 + 0, 16 + 0xc},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
keyScheduleA{
|
||||||
|
{0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0},
|
||||||
|
{1, 4, 0, 2, 1, 3, 16 + 2},
|
||||||
|
{2, 5, 7, 6, 5, 4, 16 + 1},
|
||||||
|
{3, 7, 0xa, 9, 0xb, 8, 16 + 3},
|
||||||
|
},
|
||||||
|
keyScheduleB{
|
||||||
|
{3, 2, 0xc, 0xd, 8},
|
||||||
|
{1, 0, 0xe, 0xf, 0xd},
|
||||||
|
{7, 6, 8, 9, 3},
|
||||||
|
{5, 4, 0xa, 0xb, 7},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
keyScheduleA{
|
||||||
|
{4, 0, 0xd, 0xf, 0xc, 0xe, 8},
|
||||||
|
{5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa},
|
||||||
|
{6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9},
|
||||||
|
{7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb},
|
||||||
|
},
|
||||||
|
keyScheduleB{
|
||||||
|
{16 + 3, 16 + 2, 16 + 0xc, 16 + 0xd, 16 + 9},
|
||||||
|
{16 + 1, 16 + 0, 16 + 0xe, 16 + 0xf, 16 + 0xc},
|
||||||
|
{16 + 7, 16 + 6, 16 + 8, 16 + 9, 16 + 2},
|
||||||
|
{16 + 5, 16 + 4, 16 + 0xa, 16 + 0xb, 16 + 6},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
keyScheduleA{
|
||||||
|
{0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0},
|
||||||
|
{1, 4, 0, 2, 1, 3, 16 + 2},
|
||||||
|
{2, 5, 7, 6, 5, 4, 16 + 1},
|
||||||
|
{3, 7, 0xa, 9, 0xb, 8, 16 + 3},
|
||||||
|
},
|
||||||
|
keyScheduleB{
|
||||||
|
{8, 9, 7, 6, 3},
|
||||||
|
{0xa, 0xb, 5, 4, 7},
|
||||||
|
{0xc, 0xd, 3, 2, 8},
|
||||||
|
{0xe, 0xf, 1, 0, 0xd},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cipher) keySchedule(in []byte) {
|
||||||
|
var t [8]uint32
|
||||||
|
var k [32]uint32
|
||||||
|
|
||||||
|
for i := 0; i < 4; i++ {
|
||||||
|
j := i * 4
|
||||||
|
t[i] = uint32(in[j])<<24 | uint32(in[j+1])<<16 | uint32(in[j+2])<<8 | uint32(in[j+3])
|
||||||
|
}
|
||||||
|
|
||||||
|
x := []byte{6, 7, 4, 5}
|
||||||
|
ki := 0
|
||||||
|
|
||||||
|
for half := 0; half < 2; half++ {
|
||||||
|
for _, round := range schedule {
|
||||||
|
for j := 0; j < 4; j++ {
|
||||||
|
var a [7]uint8
|
||||||
|
copy(a[:], round.a[j][:])
|
||||||
|
w := t[a[1]]
|
||||||
|
w ^= sBox[4][(t[a[2]>>2]>>(24-8*(a[2]&3)))&0xff]
|
||||||
|
w ^= sBox[5][(t[a[3]>>2]>>(24-8*(a[3]&3)))&0xff]
|
||||||
|
w ^= sBox[6][(t[a[4]>>2]>>(24-8*(a[4]&3)))&0xff]
|
||||||
|
w ^= sBox[7][(t[a[5]>>2]>>(24-8*(a[5]&3)))&0xff]
|
||||||
|
w ^= sBox[x[j]][(t[a[6]>>2]>>(24-8*(a[6]&3)))&0xff]
|
||||||
|
t[a[0]] = w
|
||||||
|
}
|
||||||
|
|
||||||
|
for j := 0; j < 4; j++ {
|
||||||
|
var b [5]uint8
|
||||||
|
copy(b[:], round.b[j][:])
|
||||||
|
w := sBox[4][(t[b[0]>>2]>>(24-8*(b[0]&3)))&0xff]
|
||||||
|
w ^= sBox[5][(t[b[1]>>2]>>(24-8*(b[1]&3)))&0xff]
|
||||||
|
w ^= sBox[6][(t[b[2]>>2]>>(24-8*(b[2]&3)))&0xff]
|
||||||
|
w ^= sBox[7][(t[b[3]>>2]>>(24-8*(b[3]&3)))&0xff]
|
||||||
|
w ^= sBox[4+j][(t[b[4]>>2]>>(24-8*(b[4]&3)))&0xff]
|
||||||
|
k[ki] = w
|
||||||
|
ki++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < 16; i++ {
|
||||||
|
c.masking[i] = k[i]
|
||||||
|
c.rotate[i] = uint8(k[16+i] & 0x1f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// These are the three 'f' functions. See RFC 2144, section 2.2.
|
||||||
|
func f1(d, m uint32, r uint8) uint32 {
|
||||||
|
t := m + d
|
||||||
|
I := (t << r) | (t >> (32 - r))
|
||||||
|
return ((sBox[0][I>>24] ^ sBox[1][(I>>16)&0xff]) - sBox[2][(I>>8)&0xff]) + sBox[3][I&0xff]
|
||||||
|
}
|
||||||
|
|
||||||
|
func f2(d, m uint32, r uint8) uint32 {
|
||||||
|
t := m ^ d
|
||||||
|
I := (t << r) | (t >> (32 - r))
|
||||||
|
return ((sBox[0][I>>24] - sBox[1][(I>>16)&0xff]) + sBox[2][(I>>8)&0xff]) ^ sBox[3][I&0xff]
|
||||||
|
}
|
||||||
|
|
||||||
|
func f3(d, m uint32, r uint8) uint32 {
|
||||||
|
t := m - d
|
||||||
|
I := (t << r) | (t >> (32 - r))
|
||||||
|
return ((sBox[0][I>>24] + sBox[1][(I>>16)&0xff]) ^ sBox[2][(I>>8)&0xff]) - sBox[3][I&0xff]
|
||||||
|
}
|
||||||
|
|
||||||
|
var sBox = [8][256]uint32{
|
||||||
|
{
|
||||||
|
0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f, 0x9c004dd3, 0x6003e540, 0xcf9fc949,
|
||||||
|
0xbfd4af27, 0x88bbbdb5, 0xe2034090, 0x98d09675, 0x6e63a0e0, 0x15c361d2, 0xc2e7661d, 0x22d4ff8e,
|
||||||
|
0x28683b6f, 0xc07fd059, 0xff2379c8, 0x775f50e2, 0x43c340d3, 0xdf2f8656, 0x887ca41a, 0xa2d2bd2d,
|
||||||
|
0xa1c9e0d6, 0x346c4819, 0x61b76d87, 0x22540f2f, 0x2abe32e1, 0xaa54166b, 0x22568e3a, 0xa2d341d0,
|
||||||
|
0x66db40c8, 0xa784392f, 0x004dff2f, 0x2db9d2de, 0x97943fac, 0x4a97c1d8, 0x527644b7, 0xb5f437a7,
|
||||||
|
0xb82cbaef, 0xd751d159, 0x6ff7f0ed, 0x5a097a1f, 0x827b68d0, 0x90ecf52e, 0x22b0c054, 0xbc8e5935,
|
||||||
|
0x4b6d2f7f, 0x50bb64a2, 0xd2664910, 0xbee5812d, 0xb7332290, 0xe93b159f, 0xb48ee411, 0x4bff345d,
|
||||||
|
0xfd45c240, 0xad31973f, 0xc4f6d02e, 0x55fc8165, 0xd5b1caad, 0xa1ac2dae, 0xa2d4b76d, 0xc19b0c50,
|
||||||
|
0x882240f2, 0x0c6e4f38, 0xa4e4bfd7, 0x4f5ba272, 0x564c1d2f, 0xc59c5319, 0xb949e354, 0xb04669fe,
|
||||||
|
0xb1b6ab8a, 0xc71358dd, 0x6385c545, 0x110f935d, 0x57538ad5, 0x6a390493, 0xe63d37e0, 0x2a54f6b3,
|
||||||
|
0x3a787d5f, 0x6276a0b5, 0x19a6fcdf, 0x7a42206a, 0x29f9d4d5, 0xf61b1891, 0xbb72275e, 0xaa508167,
|
||||||
|
0x38901091, 0xc6b505eb, 0x84c7cb8c, 0x2ad75a0f, 0x874a1427, 0xa2d1936b, 0x2ad286af, 0xaa56d291,
|
||||||
|
0xd7894360, 0x425c750d, 0x93b39e26, 0x187184c9, 0x6c00b32d, 0x73e2bb14, 0xa0bebc3c, 0x54623779,
|
||||||
|
0x64459eab, 0x3f328b82, 0x7718cf82, 0x59a2cea6, 0x04ee002e, 0x89fe78e6, 0x3fab0950, 0x325ff6c2,
|
||||||
|
0x81383f05, 0x6963c5c8, 0x76cb5ad6, 0xd49974c9, 0xca180dcf, 0x380782d5, 0xc7fa5cf6, 0x8ac31511,
|
||||||
|
0x35e79e13, 0x47da91d0, 0xf40f9086, 0xa7e2419e, 0x31366241, 0x051ef495, 0xaa573b04, 0x4a805d8d,
|
||||||
|
0x548300d0, 0x00322a3c, 0xbf64cddf, 0xba57a68e, 0x75c6372b, 0x50afd341, 0xa7c13275, 0x915a0bf5,
|
||||||
|
0x6b54bfab, 0x2b0b1426, 0xab4cc9d7, 0x449ccd82, 0xf7fbf265, 0xab85c5f3, 0x1b55db94, 0xaad4e324,
|
||||||
|
0xcfa4bd3f, 0x2deaa3e2, 0x9e204d02, 0xc8bd25ac, 0xeadf55b3, 0xd5bd9e98, 0xe31231b2, 0x2ad5ad6c,
|
||||||
|
0x954329de, 0xadbe4528, 0xd8710f69, 0xaa51c90f, 0xaa786bf6, 0x22513f1e, 0xaa51a79b, 0x2ad344cc,
|
||||||
|
0x7b5a41f0, 0xd37cfbad, 0x1b069505, 0x41ece491, 0xb4c332e6, 0x032268d4, 0xc9600acc, 0xce387e6d,
|
||||||
|
0xbf6bb16c, 0x6a70fb78, 0x0d03d9c9, 0xd4df39de, 0xe01063da, 0x4736f464, 0x5ad328d8, 0xb347cc96,
|
||||||
|
0x75bb0fc3, 0x98511bfb, 0x4ffbcc35, 0xb58bcf6a, 0xe11f0abc, 0xbfc5fe4a, 0xa70aec10, 0xac39570a,
|
||||||
|
0x3f04442f, 0x6188b153, 0xe0397a2e, 0x5727cb79, 0x9ceb418f, 0x1cacd68d, 0x2ad37c96, 0x0175cb9d,
|
||||||
|
0xc69dff09, 0xc75b65f0, 0xd9db40d8, 0xec0e7779, 0x4744ead4, 0xb11c3274, 0xdd24cb9e, 0x7e1c54bd,
|
||||||
|
0xf01144f9, 0xd2240eb1, 0x9675b3fd, 0xa3ac3755, 0xd47c27af, 0x51c85f4d, 0x56907596, 0xa5bb15e6,
|
||||||
|
0x580304f0, 0xca042cf1, 0x011a37ea, 0x8dbfaadb, 0x35ba3e4a, 0x3526ffa0, 0xc37b4d09, 0xbc306ed9,
|
||||||
|
0x98a52666, 0x5648f725, 0xff5e569d, 0x0ced63d0, 0x7c63b2cf, 0x700b45e1, 0xd5ea50f1, 0x85a92872,
|
||||||
|
0xaf1fbda7, 0xd4234870, 0xa7870bf3, 0x2d3b4d79, 0x42e04198, 0x0cd0ede7, 0x26470db8, 0xf881814c,
|
||||||
|
0x474d6ad7, 0x7c0c5e5c, 0xd1231959, 0x381b7298, 0xf5d2f4db, 0xab838653, 0x6e2f1e23, 0x83719c9e,
|
||||||
|
0xbd91e046, 0x9a56456e, 0xdc39200c, 0x20c8c571, 0x962bda1c, 0xe1e696ff, 0xb141ab08, 0x7cca89b9,
|
||||||
|
0x1a69e783, 0x02cc4843, 0xa2f7c579, 0x429ef47d, 0x427b169c, 0x5ac9f049, 0xdd8f0f00, 0x5c8165bf,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
0x1f201094, 0xef0ba75b, 0x69e3cf7e, 0x393f4380, 0xfe61cf7a, 0xeec5207a, 0x55889c94, 0x72fc0651,
|
||||||
|
0xada7ef79, 0x4e1d7235, 0xd55a63ce, 0xde0436ba, 0x99c430ef, 0x5f0c0794, 0x18dcdb7d, 0xa1d6eff3,
|
||||||
|
0xa0b52f7b, 0x59e83605, 0xee15b094, 0xe9ffd909, 0xdc440086, 0xef944459, 0xba83ccb3, 0xe0c3cdfb,
|
||||||
|
0xd1da4181, 0x3b092ab1, 0xf997f1c1, 0xa5e6cf7b, 0x01420ddb, 0xe4e7ef5b, 0x25a1ff41, 0xe180f806,
|
||||||
|
0x1fc41080, 0x179bee7a, 0xd37ac6a9, 0xfe5830a4, 0x98de8b7f, 0x77e83f4e, 0x79929269, 0x24fa9f7b,
|
||||||
|
0xe113c85b, 0xacc40083, 0xd7503525, 0xf7ea615f, 0x62143154, 0x0d554b63, 0x5d681121, 0xc866c359,
|
||||||
|
0x3d63cf73, 0xcee234c0, 0xd4d87e87, 0x5c672b21, 0x071f6181, 0x39f7627f, 0x361e3084, 0xe4eb573b,
|
||||||
|
0x602f64a4, 0xd63acd9c, 0x1bbc4635, 0x9e81032d, 0x2701f50c, 0x99847ab4, 0xa0e3df79, 0xba6cf38c,
|
||||||
|
0x10843094, 0x2537a95e, 0xf46f6ffe, 0xa1ff3b1f, 0x208cfb6a, 0x8f458c74, 0xd9e0a227, 0x4ec73a34,
|
||||||
|
0xfc884f69, 0x3e4de8df, 0xef0e0088, 0x3559648d, 0x8a45388c, 0x1d804366, 0x721d9bfd, 0xa58684bb,
|
||||||
|
0xe8256333, 0x844e8212, 0x128d8098, 0xfed33fb4, 0xce280ae1, 0x27e19ba5, 0xd5a6c252, 0xe49754bd,
|
||||||
|
0xc5d655dd, 0xeb667064, 0x77840b4d, 0xa1b6a801, 0x84db26a9, 0xe0b56714, 0x21f043b7, 0xe5d05860,
|
||||||
|
0x54f03084, 0x066ff472, 0xa31aa153, 0xdadc4755, 0xb5625dbf, 0x68561be6, 0x83ca6b94, 0x2d6ed23b,
|
||||||
|
0xeccf01db, 0xa6d3d0ba, 0xb6803d5c, 0xaf77a709, 0x33b4a34c, 0x397bc8d6, 0x5ee22b95, 0x5f0e5304,
|
||||||
|
0x81ed6f61, 0x20e74364, 0xb45e1378, 0xde18639b, 0x881ca122, 0xb96726d1, 0x8049a7e8, 0x22b7da7b,
|
||||||
|
0x5e552d25, 0x5272d237, 0x79d2951c, 0xc60d894c, 0x488cb402, 0x1ba4fe5b, 0xa4b09f6b, 0x1ca815cf,
|
||||||
|
0xa20c3005, 0x8871df63, 0xb9de2fcb, 0x0cc6c9e9, 0x0beeff53, 0xe3214517, 0xb4542835, 0x9f63293c,
|
||||||
|
0xee41e729, 0x6e1d2d7c, 0x50045286, 0x1e6685f3, 0xf33401c6, 0x30a22c95, 0x31a70850, 0x60930f13,
|
||||||
|
0x73f98417, 0xa1269859, 0xec645c44, 0x52c877a9, 0xcdff33a6, 0xa02b1741, 0x7cbad9a2, 0x2180036f,
|
||||||
|
0x50d99c08, 0xcb3f4861, 0xc26bd765, 0x64a3f6ab, 0x80342676, 0x25a75e7b, 0xe4e6d1fc, 0x20c710e6,
|
||||||
|
0xcdf0b680, 0x17844d3b, 0x31eef84d, 0x7e0824e4, 0x2ccb49eb, 0x846a3bae, 0x8ff77888, 0xee5d60f6,
|
||||||
|
0x7af75673, 0x2fdd5cdb, 0xa11631c1, 0x30f66f43, 0xb3faec54, 0x157fd7fa, 0xef8579cc, 0xd152de58,
|
||||||
|
0xdb2ffd5e, 0x8f32ce19, 0x306af97a, 0x02f03ef8, 0x99319ad5, 0xc242fa0f, 0xa7e3ebb0, 0xc68e4906,
|
||||||
|
0xb8da230c, 0x80823028, 0xdcdef3c8, 0xd35fb171, 0x088a1bc8, 0xbec0c560, 0x61a3c9e8, 0xbca8f54d,
|
||||||
|
0xc72feffa, 0x22822e99, 0x82c570b4, 0xd8d94e89, 0x8b1c34bc, 0x301e16e6, 0x273be979, 0xb0ffeaa6,
|
||||||
|
0x61d9b8c6, 0x00b24869, 0xb7ffce3f, 0x08dc283b, 0x43daf65a, 0xf7e19798, 0x7619b72f, 0x8f1c9ba4,
|
||||||
|
0xdc8637a0, 0x16a7d3b1, 0x9fc393b7, 0xa7136eeb, 0xc6bcc63e, 0x1a513742, 0xef6828bc, 0x520365d6,
|
||||||
|
0x2d6a77ab, 0x3527ed4b, 0x821fd216, 0x095c6e2e, 0xdb92f2fb, 0x5eea29cb, 0x145892f5, 0x91584f7f,
|
||||||
|
0x5483697b, 0x2667a8cc, 0x85196048, 0x8c4bacea, 0x833860d4, 0x0d23e0f9, 0x6c387e8a, 0x0ae6d249,
|
||||||
|
0xb284600c, 0xd835731d, 0xdcb1c647, 0xac4c56ea, 0x3ebd81b3, 0x230eabb0, 0x6438bc87, 0xf0b5b1fa,
|
||||||
|
0x8f5ea2b3, 0xfc184642, 0x0a036b7a, 0x4fb089bd, 0x649da589, 0xa345415e, 0x5c038323, 0x3e5d3bb9,
|
||||||
|
0x43d79572, 0x7e6dd07c, 0x06dfdf1e, 0x6c6cc4ef, 0x7160a539, 0x73bfbe70, 0x83877605, 0x4523ecf1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
0x8defc240, 0x25fa5d9f, 0xeb903dbf, 0xe810c907, 0x47607fff, 0x369fe44b, 0x8c1fc644, 0xaececa90,
|
||||||
|
0xbeb1f9bf, 0xeefbcaea, 0xe8cf1950, 0x51df07ae, 0x920e8806, 0xf0ad0548, 0xe13c8d83, 0x927010d5,
|
||||||
|
0x11107d9f, 0x07647db9, 0xb2e3e4d4, 0x3d4f285e, 0xb9afa820, 0xfade82e0, 0xa067268b, 0x8272792e,
|
||||||
|
0x553fb2c0, 0x489ae22b, 0xd4ef9794, 0x125e3fbc, 0x21fffcee, 0x825b1bfd, 0x9255c5ed, 0x1257a240,
|
||||||
|
0x4e1a8302, 0xbae07fff, 0x528246e7, 0x8e57140e, 0x3373f7bf, 0x8c9f8188, 0xa6fc4ee8, 0xc982b5a5,
|
||||||
|
0xa8c01db7, 0x579fc264, 0x67094f31, 0xf2bd3f5f, 0x40fff7c1, 0x1fb78dfc, 0x8e6bd2c1, 0x437be59b,
|
||||||
|
0x99b03dbf, 0xb5dbc64b, 0x638dc0e6, 0x55819d99, 0xa197c81c, 0x4a012d6e, 0xc5884a28, 0xccc36f71,
|
||||||
|
0xb843c213, 0x6c0743f1, 0x8309893c, 0x0feddd5f, 0x2f7fe850, 0xd7c07f7e, 0x02507fbf, 0x5afb9a04,
|
||||||
|
0xa747d2d0, 0x1651192e, 0xaf70bf3e, 0x58c31380, 0x5f98302e, 0x727cc3c4, 0x0a0fb402, 0x0f7fef82,
|
||||||
|
0x8c96fdad, 0x5d2c2aae, 0x8ee99a49, 0x50da88b8, 0x8427f4a0, 0x1eac5790, 0x796fb449, 0x8252dc15,
|
||||||
|
0xefbd7d9b, 0xa672597d, 0xada840d8, 0x45f54504, 0xfa5d7403, 0xe83ec305, 0x4f91751a, 0x925669c2,
|
||||||
|
0x23efe941, 0xa903f12e, 0x60270df2, 0x0276e4b6, 0x94fd6574, 0x927985b2, 0x8276dbcb, 0x02778176,
|
||||||
|
0xf8af918d, 0x4e48f79e, 0x8f616ddf, 0xe29d840e, 0x842f7d83, 0x340ce5c8, 0x96bbb682, 0x93b4b148,
|
||||||
|
0xef303cab, 0x984faf28, 0x779faf9b, 0x92dc560d, 0x224d1e20, 0x8437aa88, 0x7d29dc96, 0x2756d3dc,
|
||||||
|
0x8b907cee, 0xb51fd240, 0xe7c07ce3, 0xe566b4a1, 0xc3e9615e, 0x3cf8209d, 0x6094d1e3, 0xcd9ca341,
|
||||||
|
0x5c76460e, 0x00ea983b, 0xd4d67881, 0xfd47572c, 0xf76cedd9, 0xbda8229c, 0x127dadaa, 0x438a074e,
|
||||||
|
0x1f97c090, 0x081bdb8a, 0x93a07ebe, 0xb938ca15, 0x97b03cff, 0x3dc2c0f8, 0x8d1ab2ec, 0x64380e51,
|
||||||
|
0x68cc7bfb, 0xd90f2788, 0x12490181, 0x5de5ffd4, 0xdd7ef86a, 0x76a2e214, 0xb9a40368, 0x925d958f,
|
||||||
|
0x4b39fffa, 0xba39aee9, 0xa4ffd30b, 0xfaf7933b, 0x6d498623, 0x193cbcfa, 0x27627545, 0x825cf47a,
|
||||||
|
0x61bd8ba0, 0xd11e42d1, 0xcead04f4, 0x127ea392, 0x10428db7, 0x8272a972, 0x9270c4a8, 0x127de50b,
|
||||||
|
0x285ba1c8, 0x3c62f44f, 0x35c0eaa5, 0xe805d231, 0x428929fb, 0xb4fcdf82, 0x4fb66a53, 0x0e7dc15b,
|
||||||
|
0x1f081fab, 0x108618ae, 0xfcfd086d, 0xf9ff2889, 0x694bcc11, 0x236a5cae, 0x12deca4d, 0x2c3f8cc5,
|
||||||
|
0xd2d02dfe, 0xf8ef5896, 0xe4cf52da, 0x95155b67, 0x494a488c, 0xb9b6a80c, 0x5c8f82bc, 0x89d36b45,
|
||||||
|
0x3a609437, 0xec00c9a9, 0x44715253, 0x0a874b49, 0xd773bc40, 0x7c34671c, 0x02717ef6, 0x4feb5536,
|
||||||
|
0xa2d02fff, 0xd2bf60c4, 0xd43f03c0, 0x50b4ef6d, 0x07478cd1, 0x006e1888, 0xa2e53f55, 0xb9e6d4bc,
|
||||||
|
0xa2048016, 0x97573833, 0xd7207d67, 0xde0f8f3d, 0x72f87b33, 0xabcc4f33, 0x7688c55d, 0x7b00a6b0,
|
||||||
|
0x947b0001, 0x570075d2, 0xf9bb88f8, 0x8942019e, 0x4264a5ff, 0x856302e0, 0x72dbd92b, 0xee971b69,
|
||||||
|
0x6ea22fde, 0x5f08ae2b, 0xaf7a616d, 0xe5c98767, 0xcf1febd2, 0x61efc8c2, 0xf1ac2571, 0xcc8239c2,
|
||||||
|
0x67214cb8, 0xb1e583d1, 0xb7dc3e62, 0x7f10bdce, 0xf90a5c38, 0x0ff0443d, 0x606e6dc6, 0x60543a49,
|
||||||
|
0x5727c148, 0x2be98a1d, 0x8ab41738, 0x20e1be24, 0xaf96da0f, 0x68458425, 0x99833be5, 0x600d457d,
|
||||||
|
0x282f9350, 0x8334b362, 0xd91d1120, 0x2b6d8da0, 0x642b1e31, 0x9c305a00, 0x52bce688, 0x1b03588a,
|
||||||
|
0xf7baefd5, 0x4142ed9c, 0xa4315c11, 0x83323ec5, 0xdfef4636, 0xa133c501, 0xe9d3531c, 0xee353783,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
0x9db30420, 0x1fb6e9de, 0xa7be7bef, 0xd273a298, 0x4a4f7bdb, 0x64ad8c57, 0x85510443, 0xfa020ed1,
|
||||||
|
0x7e287aff, 0xe60fb663, 0x095f35a1, 0x79ebf120, 0xfd059d43, 0x6497b7b1, 0xf3641f63, 0x241e4adf,
|
||||||
|
0x28147f5f, 0x4fa2b8cd, 0xc9430040, 0x0cc32220, 0xfdd30b30, 0xc0a5374f, 0x1d2d00d9, 0x24147b15,
|
||||||
|
0xee4d111a, 0x0fca5167, 0x71ff904c, 0x2d195ffe, 0x1a05645f, 0x0c13fefe, 0x081b08ca, 0x05170121,
|
||||||
|
0x80530100, 0xe83e5efe, 0xac9af4f8, 0x7fe72701, 0xd2b8ee5f, 0x06df4261, 0xbb9e9b8a, 0x7293ea25,
|
||||||
|
0xce84ffdf, 0xf5718801, 0x3dd64b04, 0xa26f263b, 0x7ed48400, 0x547eebe6, 0x446d4ca0, 0x6cf3d6f5,
|
||||||
|
0x2649abdf, 0xaea0c7f5, 0x36338cc1, 0x503f7e93, 0xd3772061, 0x11b638e1, 0x72500e03, 0xf80eb2bb,
|
||||||
|
0xabe0502e, 0xec8d77de, 0x57971e81, 0xe14f6746, 0xc9335400, 0x6920318f, 0x081dbb99, 0xffc304a5,
|
||||||
|
0x4d351805, 0x7f3d5ce3, 0xa6c866c6, 0x5d5bcca9, 0xdaec6fea, 0x9f926f91, 0x9f46222f, 0x3991467d,
|
||||||
|
0xa5bf6d8e, 0x1143c44f, 0x43958302, 0xd0214eeb, 0x022083b8, 0x3fb6180c, 0x18f8931e, 0x281658e6,
|
||||||
|
0x26486e3e, 0x8bd78a70, 0x7477e4c1, 0xb506e07c, 0xf32d0a25, 0x79098b02, 0xe4eabb81, 0x28123b23,
|
||||||
|
0x69dead38, 0x1574ca16, 0xdf871b62, 0x211c40b7, 0xa51a9ef9, 0x0014377b, 0x041e8ac8, 0x09114003,
|
||||||
|
0xbd59e4d2, 0xe3d156d5, 0x4fe876d5, 0x2f91a340, 0x557be8de, 0x00eae4a7, 0x0ce5c2ec, 0x4db4bba6,
|
||||||
|
0xe756bdff, 0xdd3369ac, 0xec17b035, 0x06572327, 0x99afc8b0, 0x56c8c391, 0x6b65811c, 0x5e146119,
|
||||||
|
0x6e85cb75, 0xbe07c002, 0xc2325577, 0x893ff4ec, 0x5bbfc92d, 0xd0ec3b25, 0xb7801ab7, 0x8d6d3b24,
|
||||||
|
0x20c763ef, 0xc366a5fc, 0x9c382880, 0x0ace3205, 0xaac9548a, 0xeca1d7c7, 0x041afa32, 0x1d16625a,
|
||||||
|
0x6701902c, 0x9b757a54, 0x31d477f7, 0x9126b031, 0x36cc6fdb, 0xc70b8b46, 0xd9e66a48, 0x56e55a79,
|
||||||
|
0x026a4ceb, 0x52437eff, 0x2f8f76b4, 0x0df980a5, 0x8674cde3, 0xedda04eb, 0x17a9be04, 0x2c18f4df,
|
||||||
|
0xb7747f9d, 0xab2af7b4, 0xefc34d20, 0x2e096b7c, 0x1741a254, 0xe5b6a035, 0x213d42f6, 0x2c1c7c26,
|
||||||
|
0x61c2f50f, 0x6552daf9, 0xd2c231f8, 0x25130f69, 0xd8167fa2, 0x0418f2c8, 0x001a96a6, 0x0d1526ab,
|
||||||
|
0x63315c21, 0x5e0a72ec, 0x49bafefd, 0x187908d9, 0x8d0dbd86, 0x311170a7, 0x3e9b640c, 0xcc3e10d7,
|
||||||
|
0xd5cad3b6, 0x0caec388, 0xf73001e1, 0x6c728aff, 0x71eae2a1, 0x1f9af36e, 0xcfcbd12f, 0xc1de8417,
|
||||||
|
0xac07be6b, 0xcb44a1d8, 0x8b9b0f56, 0x013988c3, 0xb1c52fca, 0xb4be31cd, 0xd8782806, 0x12a3a4e2,
|
||||||
|
0x6f7de532, 0x58fd7eb6, 0xd01ee900, 0x24adffc2, 0xf4990fc5, 0x9711aac5, 0x001d7b95, 0x82e5e7d2,
|
||||||
|
0x109873f6, 0x00613096, 0xc32d9521, 0xada121ff, 0x29908415, 0x7fbb977f, 0xaf9eb3db, 0x29c9ed2a,
|
||||||
|
0x5ce2a465, 0xa730f32c, 0xd0aa3fe8, 0x8a5cc091, 0xd49e2ce7, 0x0ce454a9, 0xd60acd86, 0x015f1919,
|
||||||
|
0x77079103, 0xdea03af6, 0x78a8565e, 0xdee356df, 0x21f05cbe, 0x8b75e387, 0xb3c50651, 0xb8a5c3ef,
|
||||||
|
0xd8eeb6d2, 0xe523be77, 0xc2154529, 0x2f69efdf, 0xafe67afb, 0xf470c4b2, 0xf3e0eb5b, 0xd6cc9876,
|
||||||
|
0x39e4460c, 0x1fda8538, 0x1987832f, 0xca007367, 0xa99144f8, 0x296b299e, 0x492fc295, 0x9266beab,
|
||||||
|
0xb5676e69, 0x9bd3ddda, 0xdf7e052f, 0xdb25701c, 0x1b5e51ee, 0xf65324e6, 0x6afce36c, 0x0316cc04,
|
||||||
|
0x8644213e, 0xb7dc59d0, 0x7965291f, 0xccd6fd43, 0x41823979, 0x932bcdf6, 0xb657c34d, 0x4edfd282,
|
||||||
|
0x7ae5290c, 0x3cb9536b, 0x851e20fe, 0x9833557e, 0x13ecf0b0, 0xd3ffb372, 0x3f85c5c1, 0x0aef7ed2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
0x7ec90c04, 0x2c6e74b9, 0x9b0e66df, 0xa6337911, 0xb86a7fff, 0x1dd358f5, 0x44dd9d44, 0x1731167f,
|
||||||
|
0x08fbf1fa, 0xe7f511cc, 0xd2051b00, 0x735aba00, 0x2ab722d8, 0x386381cb, 0xacf6243a, 0x69befd7a,
|
||||||
|
0xe6a2e77f, 0xf0c720cd, 0xc4494816, 0xccf5c180, 0x38851640, 0x15b0a848, 0xe68b18cb, 0x4caadeff,
|
||||||
|
0x5f480a01, 0x0412b2aa, 0x259814fc, 0x41d0efe2, 0x4e40b48d, 0x248eb6fb, 0x8dba1cfe, 0x41a99b02,
|
||||||
|
0x1a550a04, 0xba8f65cb, 0x7251f4e7, 0x95a51725, 0xc106ecd7, 0x97a5980a, 0xc539b9aa, 0x4d79fe6a,
|
||||||
|
0xf2f3f763, 0x68af8040, 0xed0c9e56, 0x11b4958b, 0xe1eb5a88, 0x8709e6b0, 0xd7e07156, 0x4e29fea7,
|
||||||
|
0x6366e52d, 0x02d1c000, 0xc4ac8e05, 0x9377f571, 0x0c05372a, 0x578535f2, 0x2261be02, 0xd642a0c9,
|
||||||
|
0xdf13a280, 0x74b55bd2, 0x682199c0, 0xd421e5ec, 0x53fb3ce8, 0xc8adedb3, 0x28a87fc9, 0x3d959981,
|
||||||
|
0x5c1ff900, 0xfe38d399, 0x0c4eff0b, 0x062407ea, 0xaa2f4fb1, 0x4fb96976, 0x90c79505, 0xb0a8a774,
|
||||||
|
0xef55a1ff, 0xe59ca2c2, 0xa6b62d27, 0xe66a4263, 0xdf65001f, 0x0ec50966, 0xdfdd55bc, 0x29de0655,
|
||||||
|
0x911e739a, 0x17af8975, 0x32c7911c, 0x89f89468, 0x0d01e980, 0x524755f4, 0x03b63cc9, 0x0cc844b2,
|
||||||
|
0xbcf3f0aa, 0x87ac36e9, 0xe53a7426, 0x01b3d82b, 0x1a9e7449, 0x64ee2d7e, 0xcddbb1da, 0x01c94910,
|
||||||
|
0xb868bf80, 0x0d26f3fd, 0x9342ede7, 0x04a5c284, 0x636737b6, 0x50f5b616, 0xf24766e3, 0x8eca36c1,
|
||||||
|
0x136e05db, 0xfef18391, 0xfb887a37, 0xd6e7f7d4, 0xc7fb7dc9, 0x3063fcdf, 0xb6f589de, 0xec2941da,
|
||||||
|
0x26e46695, 0xb7566419, 0xf654efc5, 0xd08d58b7, 0x48925401, 0xc1bacb7f, 0xe5ff550f, 0xb6083049,
|
||||||
|
0x5bb5d0e8, 0x87d72e5a, 0xab6a6ee1, 0x223a66ce, 0xc62bf3cd, 0x9e0885f9, 0x68cb3e47, 0x086c010f,
|
||||||
|
0xa21de820, 0xd18b69de, 0xf3f65777, 0xfa02c3f6, 0x407edac3, 0xcbb3d550, 0x1793084d, 0xb0d70eba,
|
||||||
|
0x0ab378d5, 0xd951fb0c, 0xded7da56, 0x4124bbe4, 0x94ca0b56, 0x0f5755d1, 0xe0e1e56e, 0x6184b5be,
|
||||||
|
0x580a249f, 0x94f74bc0, 0xe327888e, 0x9f7b5561, 0xc3dc0280, 0x05687715, 0x646c6bd7, 0x44904db3,
|
||||||
|
0x66b4f0a3, 0xc0f1648a, 0x697ed5af, 0x49e92ff6, 0x309e374f, 0x2cb6356a, 0x85808573, 0x4991f840,
|
||||||
|
0x76f0ae02, 0x083be84d, 0x28421c9a, 0x44489406, 0x736e4cb8, 0xc1092910, 0x8bc95fc6, 0x7d869cf4,
|
||||||
|
0x134f616f, 0x2e77118d, 0xb31b2be1, 0xaa90b472, 0x3ca5d717, 0x7d161bba, 0x9cad9010, 0xaf462ba2,
|
||||||
|
0x9fe459d2, 0x45d34559, 0xd9f2da13, 0xdbc65487, 0xf3e4f94e, 0x176d486f, 0x097c13ea, 0x631da5c7,
|
||||||
|
0x445f7382, 0x175683f4, 0xcdc66a97, 0x70be0288, 0xb3cdcf72, 0x6e5dd2f3, 0x20936079, 0x459b80a5,
|
||||||
|
0xbe60e2db, 0xa9c23101, 0xeba5315c, 0x224e42f2, 0x1c5c1572, 0xf6721b2c, 0x1ad2fff3, 0x8c25404e,
|
||||||
|
0x324ed72f, 0x4067b7fd, 0x0523138e, 0x5ca3bc78, 0xdc0fd66e, 0x75922283, 0x784d6b17, 0x58ebb16e,
|
||||||
|
0x44094f85, 0x3f481d87, 0xfcfeae7b, 0x77b5ff76, 0x8c2302bf, 0xaaf47556, 0x5f46b02a, 0x2b092801,
|
||||||
|
0x3d38f5f7, 0x0ca81f36, 0x52af4a8a, 0x66d5e7c0, 0xdf3b0874, 0x95055110, 0x1b5ad7a8, 0xf61ed5ad,
|
||||||
|
0x6cf6e479, 0x20758184, 0xd0cefa65, 0x88f7be58, 0x4a046826, 0x0ff6f8f3, 0xa09c7f70, 0x5346aba0,
|
||||||
|
0x5ce96c28, 0xe176eda3, 0x6bac307f, 0x376829d2, 0x85360fa9, 0x17e3fe2a, 0x24b79767, 0xf5a96b20,
|
||||||
|
0xd6cd2595, 0x68ff1ebf, 0x7555442c, 0xf19f06be, 0xf9e0659a, 0xeeb9491d, 0x34010718, 0xbb30cab8,
|
||||||
|
0xe822fe15, 0x88570983, 0x750e6249, 0xda627e55, 0x5e76ffa8, 0xb1534546, 0x6d47de08, 0xefe9e7d4,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
0xf6fa8f9d, 0x2cac6ce1, 0x4ca34867, 0xe2337f7c, 0x95db08e7, 0x016843b4, 0xeced5cbc, 0x325553ac,
|
||||||
|
0xbf9f0960, 0xdfa1e2ed, 0x83f0579d, 0x63ed86b9, 0x1ab6a6b8, 0xde5ebe39, 0xf38ff732, 0x8989b138,
|
||||||
|
0x33f14961, 0xc01937bd, 0xf506c6da, 0xe4625e7e, 0xa308ea99, 0x4e23e33c, 0x79cbd7cc, 0x48a14367,
|
||||||
|
0xa3149619, 0xfec94bd5, 0xa114174a, 0xeaa01866, 0xa084db2d, 0x09a8486f, 0xa888614a, 0x2900af98,
|
||||||
|
0x01665991, 0xe1992863, 0xc8f30c60, 0x2e78ef3c, 0xd0d51932, 0xcf0fec14, 0xf7ca07d2, 0xd0a82072,
|
||||||
|
0xfd41197e, 0x9305a6b0, 0xe86be3da, 0x74bed3cd, 0x372da53c, 0x4c7f4448, 0xdab5d440, 0x6dba0ec3,
|
||||||
|
0x083919a7, 0x9fbaeed9, 0x49dbcfb0, 0x4e670c53, 0x5c3d9c01, 0x64bdb941, 0x2c0e636a, 0xba7dd9cd,
|
||||||
|
0xea6f7388, 0xe70bc762, 0x35f29adb, 0x5c4cdd8d, 0xf0d48d8c, 0xb88153e2, 0x08a19866, 0x1ae2eac8,
|
||||||
|
0x284caf89, 0xaa928223, 0x9334be53, 0x3b3a21bf, 0x16434be3, 0x9aea3906, 0xefe8c36e, 0xf890cdd9,
|
||||||
|
0x80226dae, 0xc340a4a3, 0xdf7e9c09, 0xa694a807, 0x5b7c5ecc, 0x221db3a6, 0x9a69a02f, 0x68818a54,
|
||||||
|
0xceb2296f, 0x53c0843a, 0xfe893655, 0x25bfe68a, 0xb4628abc, 0xcf222ebf, 0x25ac6f48, 0xa9a99387,
|
||||||
|
0x53bddb65, 0xe76ffbe7, 0xe967fd78, 0x0ba93563, 0x8e342bc1, 0xe8a11be9, 0x4980740d, 0xc8087dfc,
|
||||||
|
0x8de4bf99, 0xa11101a0, 0x7fd37975, 0xda5a26c0, 0xe81f994f, 0x9528cd89, 0xfd339fed, 0xb87834bf,
|
||||||
|
0x5f04456d, 0x22258698, 0xc9c4c83b, 0x2dc156be, 0x4f628daa, 0x57f55ec5, 0xe2220abe, 0xd2916ebf,
|
||||||
|
0x4ec75b95, 0x24f2c3c0, 0x42d15d99, 0xcd0d7fa0, 0x7b6e27ff, 0xa8dc8af0, 0x7345c106, 0xf41e232f,
|
||||||
|
0x35162386, 0xe6ea8926, 0x3333b094, 0x157ec6f2, 0x372b74af, 0x692573e4, 0xe9a9d848, 0xf3160289,
|
||||||
|
0x3a62ef1d, 0xa787e238, 0xf3a5f676, 0x74364853, 0x20951063, 0x4576698d, 0xb6fad407, 0x592af950,
|
||||||
|
0x36f73523, 0x4cfb6e87, 0x7da4cec0, 0x6c152daa, 0xcb0396a8, 0xc50dfe5d, 0xfcd707ab, 0x0921c42f,
|
||||||
|
0x89dff0bb, 0x5fe2be78, 0x448f4f33, 0x754613c9, 0x2b05d08d, 0x48b9d585, 0xdc049441, 0xc8098f9b,
|
||||||
|
0x7dede786, 0xc39a3373, 0x42410005, 0x6a091751, 0x0ef3c8a6, 0x890072d6, 0x28207682, 0xa9a9f7be,
|
||||||
|
0xbf32679d, 0xd45b5b75, 0xb353fd00, 0xcbb0e358, 0x830f220a, 0x1f8fb214, 0xd372cf08, 0xcc3c4a13,
|
||||||
|
0x8cf63166, 0x061c87be, 0x88c98f88, 0x6062e397, 0x47cf8e7a, 0xb6c85283, 0x3cc2acfb, 0x3fc06976,
|
||||||
|
0x4e8f0252, 0x64d8314d, 0xda3870e3, 0x1e665459, 0xc10908f0, 0x513021a5, 0x6c5b68b7, 0x822f8aa0,
|
||||||
|
0x3007cd3e, 0x74719eef, 0xdc872681, 0x073340d4, 0x7e432fd9, 0x0c5ec241, 0x8809286c, 0xf592d891,
|
||||||
|
0x08a930f6, 0x957ef305, 0xb7fbffbd, 0xc266e96f, 0x6fe4ac98, 0xb173ecc0, 0xbc60b42a, 0x953498da,
|
||||||
|
0xfba1ae12, 0x2d4bd736, 0x0f25faab, 0xa4f3fceb, 0xe2969123, 0x257f0c3d, 0x9348af49, 0x361400bc,
|
||||||
|
0xe8816f4a, 0x3814f200, 0xa3f94043, 0x9c7a54c2, 0xbc704f57, 0xda41e7f9, 0xc25ad33a, 0x54f4a084,
|
||||||
|
0xb17f5505, 0x59357cbe, 0xedbd15c8, 0x7f97c5ab, 0xba5ac7b5, 0xb6f6deaf, 0x3a479c3a, 0x5302da25,
|
||||||
|
0x653d7e6a, 0x54268d49, 0x51a477ea, 0x5017d55b, 0xd7d25d88, 0x44136c76, 0x0404a8c8, 0xb8e5a121,
|
||||||
|
0xb81a928a, 0x60ed5869, 0x97c55b96, 0xeaec991b, 0x29935913, 0x01fdb7f1, 0x088e8dfa, 0x9ab6f6f5,
|
||||||
|
0x3b4cbf9f, 0x4a5de3ab, 0xe6051d35, 0xa0e1d855, 0xd36b4cf1, 0xf544edeb, 0xb0e93524, 0xbebb8fbd,
|
||||||
|
0xa2d762cf, 0x49c92f54, 0x38b5f331, 0x7128a454, 0x48392905, 0xa65b1db8, 0x851c97bd, 0xd675cf2f,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
0x85e04019, 0x332bf567, 0x662dbfff, 0xcfc65693, 0x2a8d7f6f, 0xab9bc912, 0xde6008a1, 0x2028da1f,
|
||||||
|
0x0227bce7, 0x4d642916, 0x18fac300, 0x50f18b82, 0x2cb2cb11, 0xb232e75c, 0x4b3695f2, 0xb28707de,
|
||||||
|
0xa05fbcf6, 0xcd4181e9, 0xe150210c, 0xe24ef1bd, 0xb168c381, 0xfde4e789, 0x5c79b0d8, 0x1e8bfd43,
|
||||||
|
0x4d495001, 0x38be4341, 0x913cee1d, 0x92a79c3f, 0x089766be, 0xbaeeadf4, 0x1286becf, 0xb6eacb19,
|
||||||
|
0x2660c200, 0x7565bde4, 0x64241f7a, 0x8248dca9, 0xc3b3ad66, 0x28136086, 0x0bd8dfa8, 0x356d1cf2,
|
||||||
|
0x107789be, 0xb3b2e9ce, 0x0502aa8f, 0x0bc0351e, 0x166bf52a, 0xeb12ff82, 0xe3486911, 0xd34d7516,
|
||||||
|
0x4e7b3aff, 0x5f43671b, 0x9cf6e037, 0x4981ac83, 0x334266ce, 0x8c9341b7, 0xd0d854c0, 0xcb3a6c88,
|
||||||
|
0x47bc2829, 0x4725ba37, 0xa66ad22b, 0x7ad61f1e, 0x0c5cbafa, 0x4437f107, 0xb6e79962, 0x42d2d816,
|
||||||
|
0x0a961288, 0xe1a5c06e, 0x13749e67, 0x72fc081a, 0xb1d139f7, 0xf9583745, 0xcf19df58, 0xbec3f756,
|
||||||
|
0xc06eba30, 0x07211b24, 0x45c28829, 0xc95e317f, 0xbc8ec511, 0x38bc46e9, 0xc6e6fa14, 0xbae8584a,
|
||||||
|
0xad4ebc46, 0x468f508b, 0x7829435f, 0xf124183b, 0x821dba9f, 0xaff60ff4, 0xea2c4e6d, 0x16e39264,
|
||||||
|
0x92544a8b, 0x009b4fc3, 0xaba68ced, 0x9ac96f78, 0x06a5b79a, 0xb2856e6e, 0x1aec3ca9, 0xbe838688,
|
||||||
|
0x0e0804e9, 0x55f1be56, 0xe7e5363b, 0xb3a1f25d, 0xf7debb85, 0x61fe033c, 0x16746233, 0x3c034c28,
|
||||||
|
0xda6d0c74, 0x79aac56c, 0x3ce4e1ad, 0x51f0c802, 0x98f8f35a, 0x1626a49f, 0xeed82b29, 0x1d382fe3,
|
||||||
|
0x0c4fb99a, 0xbb325778, 0x3ec6d97b, 0x6e77a6a9, 0xcb658b5c, 0xd45230c7, 0x2bd1408b, 0x60c03eb7,
|
||||||
|
0xb9068d78, 0xa33754f4, 0xf430c87d, 0xc8a71302, 0xb96d8c32, 0xebd4e7be, 0xbe8b9d2d, 0x7979fb06,
|
||||||
|
0xe7225308, 0x8b75cf77, 0x11ef8da4, 0xe083c858, 0x8d6b786f, 0x5a6317a6, 0xfa5cf7a0, 0x5dda0033,
|
||||||
|
0xf28ebfb0, 0xf5b9c310, 0xa0eac280, 0x08b9767a, 0xa3d9d2b0, 0x79d34217, 0x021a718d, 0x9ac6336a,
|
||||||
|
0x2711fd60, 0x438050e3, 0x069908a8, 0x3d7fedc4, 0x826d2bef, 0x4eeb8476, 0x488dcf25, 0x36c9d566,
|
||||||
|
0x28e74e41, 0xc2610aca, 0x3d49a9cf, 0xbae3b9df, 0xb65f8de6, 0x92aeaf64, 0x3ac7d5e6, 0x9ea80509,
|
||||||
|
0xf22b017d, 0xa4173f70, 0xdd1e16c3, 0x15e0d7f9, 0x50b1b887, 0x2b9f4fd5, 0x625aba82, 0x6a017962,
|
||||||
|
0x2ec01b9c, 0x15488aa9, 0xd716e740, 0x40055a2c, 0x93d29a22, 0xe32dbf9a, 0x058745b9, 0x3453dc1e,
|
||||||
|
0xd699296e, 0x496cff6f, 0x1c9f4986, 0xdfe2ed07, 0xb87242d1, 0x19de7eae, 0x053e561a, 0x15ad6f8c,
|
||||||
|
0x66626c1c, 0x7154c24c, 0xea082b2a, 0x93eb2939, 0x17dcb0f0, 0x58d4f2ae, 0x9ea294fb, 0x52cf564c,
|
||||||
|
0x9883fe66, 0x2ec40581, 0x763953c3, 0x01d6692e, 0xd3a0c108, 0xa1e7160e, 0xe4f2dfa6, 0x693ed285,
|
||||||
|
0x74904698, 0x4c2b0edd, 0x4f757656, 0x5d393378, 0xa132234f, 0x3d321c5d, 0xc3f5e194, 0x4b269301,
|
||||||
|
0xc79f022f, 0x3c997e7e, 0x5e4f9504, 0x3ffafbbd, 0x76f7ad0e, 0x296693f4, 0x3d1fce6f, 0xc61e45be,
|
||||||
|
0xd3b5ab34, 0xf72bf9b7, 0x1b0434c0, 0x4e72b567, 0x5592a33d, 0xb5229301, 0xcfd2a87f, 0x60aeb767,
|
||||||
|
0x1814386b, 0x30bcc33d, 0x38a0c07d, 0xfd1606f2, 0xc363519b, 0x589dd390, 0x5479f8e6, 0x1cb8d647,
|
||||||
|
0x97fd61a9, 0xea7759f4, 0x2d57539d, 0x569a58cf, 0xe84e63ad, 0x462e1b78, 0x6580f87e, 0xf3817914,
|
||||||
|
0x91da55f4, 0x40a230f3, 0xd1988f35, 0xb6e318d2, 0x3ffa50bc, 0x3d40f021, 0xc3c0bdae, 0x4958c24c,
|
||||||
|
0x518f36b2, 0x84b1d370, 0x0fedce83, 0x878ddada, 0xf2a279c7, 0x94e01be8, 0x90716f4b, 0x954b8aa3,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
0xe216300d, 0xbbddfffc, 0xa7ebdabd, 0x35648095, 0x7789f8b7, 0xe6c1121b, 0x0e241600, 0x052ce8b5,
|
||||||
|
0x11a9cfb0, 0xe5952f11, 0xece7990a, 0x9386d174, 0x2a42931c, 0x76e38111, 0xb12def3a, 0x37ddddfc,
|
||||||
|
0xde9adeb1, 0x0a0cc32c, 0xbe197029, 0x84a00940, 0xbb243a0f, 0xb4d137cf, 0xb44e79f0, 0x049eedfd,
|
||||||
|
0x0b15a15d, 0x480d3168, 0x8bbbde5a, 0x669ded42, 0xc7ece831, 0x3f8f95e7, 0x72df191b, 0x7580330d,
|
||||||
|
0x94074251, 0x5c7dcdfa, 0xabbe6d63, 0xaa402164, 0xb301d40a, 0x02e7d1ca, 0x53571dae, 0x7a3182a2,
|
||||||
|
0x12a8ddec, 0xfdaa335d, 0x176f43e8, 0x71fb46d4, 0x38129022, 0xce949ad4, 0xb84769ad, 0x965bd862,
|
||||||
|
0x82f3d055, 0x66fb9767, 0x15b80b4e, 0x1d5b47a0, 0x4cfde06f, 0xc28ec4b8, 0x57e8726e, 0x647a78fc,
|
||||||
|
0x99865d44, 0x608bd593, 0x6c200e03, 0x39dc5ff6, 0x5d0b00a3, 0xae63aff2, 0x7e8bd632, 0x70108c0c,
|
||||||
|
0xbbd35049, 0x2998df04, 0x980cf42a, 0x9b6df491, 0x9e7edd53, 0x06918548, 0x58cb7e07, 0x3b74ef2e,
|
||||||
|
0x522fffb1, 0xd24708cc, 0x1c7e27cd, 0xa4eb215b, 0x3cf1d2e2, 0x19b47a38, 0x424f7618, 0x35856039,
|
||||||
|
0x9d17dee7, 0x27eb35e6, 0xc9aff67b, 0x36baf5b8, 0x09c467cd, 0xc18910b1, 0xe11dbf7b, 0x06cd1af8,
|
||||||
|
0x7170c608, 0x2d5e3354, 0xd4de495a, 0x64c6d006, 0xbcc0c62c, 0x3dd00db3, 0x708f8f34, 0x77d51b42,
|
||||||
|
0x264f620f, 0x24b8d2bf, 0x15c1b79e, 0x46a52564, 0xf8d7e54e, 0x3e378160, 0x7895cda5, 0x859c15a5,
|
||||||
|
0xe6459788, 0xc37bc75f, 0xdb07ba0c, 0x0676a3ab, 0x7f229b1e, 0x31842e7b, 0x24259fd7, 0xf8bef472,
|
||||||
|
0x835ffcb8, 0x6df4c1f2, 0x96f5b195, 0xfd0af0fc, 0xb0fe134c, 0xe2506d3d, 0x4f9b12ea, 0xf215f225,
|
||||||
|
0xa223736f, 0x9fb4c428, 0x25d04979, 0x34c713f8, 0xc4618187, 0xea7a6e98, 0x7cd16efc, 0x1436876c,
|
||||||
|
0xf1544107, 0xbedeee14, 0x56e9af27, 0xa04aa441, 0x3cf7c899, 0x92ecbae6, 0xdd67016d, 0x151682eb,
|
||||||
|
0xa842eedf, 0xfdba60b4, 0xf1907b75, 0x20e3030f, 0x24d8c29e, 0xe139673b, 0xefa63fb8, 0x71873054,
|
||||||
|
0xb6f2cf3b, 0x9f326442, 0xcb15a4cc, 0xb01a4504, 0xf1e47d8d, 0x844a1be5, 0xbae7dfdc, 0x42cbda70,
|
||||||
|
0xcd7dae0a, 0x57e85b7a, 0xd53f5af6, 0x20cf4d8c, 0xcea4d428, 0x79d130a4, 0x3486ebfb, 0x33d3cddc,
|
||||||
|
0x77853b53, 0x37effcb5, 0xc5068778, 0xe580b3e6, 0x4e68b8f4, 0xc5c8b37e, 0x0d809ea2, 0x398feb7c,
|
||||||
|
0x132a4f94, 0x43b7950e, 0x2fee7d1c, 0x223613bd, 0xdd06caa2, 0x37df932b, 0xc4248289, 0xacf3ebc3,
|
||||||
|
0x5715f6b7, 0xef3478dd, 0xf267616f, 0xc148cbe4, 0x9052815e, 0x5e410fab, 0xb48a2465, 0x2eda7fa4,
|
||||||
|
0xe87b40e4, 0xe98ea084, 0x5889e9e1, 0xefd390fc, 0xdd07d35b, 0xdb485694, 0x38d7e5b2, 0x57720101,
|
||||||
|
0x730edebc, 0x5b643113, 0x94917e4f, 0x503c2fba, 0x646f1282, 0x7523d24a, 0xe0779695, 0xf9c17a8f,
|
||||||
|
0x7a5b2121, 0xd187b896, 0x29263a4d, 0xba510cdf, 0x81f47c9f, 0xad1163ed, 0xea7b5965, 0x1a00726e,
|
||||||
|
0x11403092, 0x00da6d77, 0x4a0cdd61, 0xad1f4603, 0x605bdfb0, 0x9eedc364, 0x22ebe6a8, 0xcee7d28a,
|
||||||
|
0xa0e736a0, 0x5564a6b9, 0x10853209, 0xc7eb8f37, 0x2de705ca, 0x8951570f, 0xdf09822b, 0xbd691a6c,
|
||||||
|
0xaa12e4f2, 0x87451c0f, 0xe0f6a27a, 0x3ada4819, 0x4cf1764f, 0x0d771c2b, 0x67cdb156, 0x350d8384,
|
||||||
|
0x5938fa0f, 0x42399ef3, 0x36997b07, 0x0e84093d, 0x4aa93e61, 0x8360d87b, 0x1fa98b0c, 0x1149382c,
|
||||||
|
0xe97625a5, 0x0614d1b7, 0x0e25244b, 0x0c768347, 0x589e8d82, 0x0d2059d1, 0xa466bb1e, 0xf8da0a82,
|
||||||
|
0x04f19130, 0xba6e4ec0, 0x99265164, 0x1ee7230d, 0x50b2ad80, 0xeaee6801, 0x8db2a283, 0xea8bf59e,
|
||||||
|
},
|
||||||
|
}
|
219
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/armor/armor.go
generated
vendored
Normal file
219
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/armor/armor.go
generated
vendored
Normal file
|
@ -0,0 +1,219 @@
|
||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package armor implements OpenPGP ASCII Armor, see RFC 4880. OpenPGP Armor is
|
||||||
|
// very similar to PEM except that it has an additional CRC checksum.
|
||||||
|
package armor // import "golang.org/x/crypto/openpgp/armor"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"encoding/base64"
|
||||||
|
"golang.org/x/crypto/openpgp/errors"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Block represents an OpenPGP armored structure.
|
||||||
|
//
|
||||||
|
// The encoded form is:
|
||||||
|
// -----BEGIN Type-----
|
||||||
|
// Headers
|
||||||
|
//
|
||||||
|
// base64-encoded Bytes
|
||||||
|
// '=' base64 encoded checksum
|
||||||
|
// -----END Type-----
|
||||||
|
// where Headers is a possibly empty sequence of Key: Value lines.
|
||||||
|
//
|
||||||
|
// Since the armored data can be very large, this package presents a streaming
|
||||||
|
// interface.
|
||||||
|
type Block struct {
|
||||||
|
Type string // The type, taken from the preamble (i.e. "PGP SIGNATURE").
|
||||||
|
Header map[string]string // Optional headers.
|
||||||
|
Body io.Reader // A Reader from which the contents can be read
|
||||||
|
lReader lineReader
|
||||||
|
oReader openpgpReader
|
||||||
|
}
|
||||||
|
|
||||||
|
var ArmorCorrupt error = errors.StructuralError("armor invalid")
|
||||||
|
|
||||||
|
const crc24Init = 0xb704ce
|
||||||
|
const crc24Poly = 0x1864cfb
|
||||||
|
const crc24Mask = 0xffffff
|
||||||
|
|
||||||
|
// crc24 calculates the OpenPGP checksum as specified in RFC 4880, section 6.1
|
||||||
|
func crc24(crc uint32, d []byte) uint32 {
|
||||||
|
for _, b := range d {
|
||||||
|
crc ^= uint32(b) << 16
|
||||||
|
for i := 0; i < 8; i++ {
|
||||||
|
crc <<= 1
|
||||||
|
if crc&0x1000000 != 0 {
|
||||||
|
crc ^= crc24Poly
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return crc
|
||||||
|
}
|
||||||
|
|
||||||
|
var armorStart = []byte("-----BEGIN ")
|
||||||
|
var armorEnd = []byte("-----END ")
|
||||||
|
var armorEndOfLine = []byte("-----")
|
||||||
|
|
||||||
|
// lineReader wraps a line based reader. It watches for the end of an armor
|
||||||
|
// block and records the expected CRC value.
|
||||||
|
type lineReader struct {
|
||||||
|
in *bufio.Reader
|
||||||
|
buf []byte
|
||||||
|
eof bool
|
||||||
|
crc uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *lineReader) Read(p []byte) (n int, err error) {
|
||||||
|
if l.eof {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(l.buf) > 0 {
|
||||||
|
n = copy(p, l.buf)
|
||||||
|
l.buf = l.buf[n:]
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
line, isPrefix, err := l.in.ReadLine()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if isPrefix {
|
||||||
|
return 0, ArmorCorrupt
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(line) == 5 && line[0] == '=' {
|
||||||
|
// This is the checksum line
|
||||||
|
var expectedBytes [3]byte
|
||||||
|
var m int
|
||||||
|
m, err = base64.StdEncoding.Decode(expectedBytes[0:], line[1:])
|
||||||
|
if m != 3 || err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.crc = uint32(expectedBytes[0])<<16 |
|
||||||
|
uint32(expectedBytes[1])<<8 |
|
||||||
|
uint32(expectedBytes[2])
|
||||||
|
|
||||||
|
line, _, err = l.in.ReadLine()
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !bytes.HasPrefix(line, armorEnd) {
|
||||||
|
return 0, ArmorCorrupt
|
||||||
|
}
|
||||||
|
|
||||||
|
l.eof = true
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(line) > 96 {
|
||||||
|
return 0, ArmorCorrupt
|
||||||
|
}
|
||||||
|
|
||||||
|
n = copy(p, line)
|
||||||
|
bytesToSave := len(line) - n
|
||||||
|
if bytesToSave > 0 {
|
||||||
|
if cap(l.buf) < bytesToSave {
|
||||||
|
l.buf = make([]byte, 0, bytesToSave)
|
||||||
|
}
|
||||||
|
l.buf = l.buf[0:bytesToSave]
|
||||||
|
copy(l.buf, line[n:])
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// openpgpReader passes Read calls to the underlying base64 decoder, but keeps
|
||||||
|
// a running CRC of the resulting data and checks the CRC against the value
|
||||||
|
// found by the lineReader at EOF.
|
||||||
|
type openpgpReader struct {
|
||||||
|
lReader *lineReader
|
||||||
|
b64Reader io.Reader
|
||||||
|
currentCRC uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *openpgpReader) Read(p []byte) (n int, err error) {
|
||||||
|
n, err = r.b64Reader.Read(p)
|
||||||
|
r.currentCRC = crc24(r.currentCRC, p[:n])
|
||||||
|
|
||||||
|
if err == io.EOF {
|
||||||
|
if r.lReader.crc != uint32(r.currentCRC&crc24Mask) {
|
||||||
|
return 0, ArmorCorrupt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode reads a PGP armored block from the given Reader. It will ignore
|
||||||
|
// leading garbage. If it doesn't find a block, it will return nil, io.EOF. The
|
||||||
|
// given Reader is not usable after calling this function: an arbitrary amount
|
||||||
|
// of data may have been read past the end of the block.
|
||||||
|
func Decode(in io.Reader) (p *Block, err error) {
|
||||||
|
r := bufio.NewReaderSize(in, 100)
|
||||||
|
var line []byte
|
||||||
|
ignoreNext := false
|
||||||
|
|
||||||
|
TryNextBlock:
|
||||||
|
p = nil
|
||||||
|
|
||||||
|
// Skip leading garbage
|
||||||
|
for {
|
||||||
|
ignoreThis := ignoreNext
|
||||||
|
line, ignoreNext, err = r.ReadLine()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if ignoreNext || ignoreThis {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
line = bytes.TrimSpace(line)
|
||||||
|
if len(line) > len(armorStart)+len(armorEndOfLine) && bytes.HasPrefix(line, armorStart) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
p = new(Block)
|
||||||
|
p.Type = string(line[len(armorStart) : len(line)-len(armorEndOfLine)])
|
||||||
|
p.Header = make(map[string]string)
|
||||||
|
nextIsContinuation := false
|
||||||
|
var lastKey string
|
||||||
|
|
||||||
|
// Read headers
|
||||||
|
for {
|
||||||
|
isContinuation := nextIsContinuation
|
||||||
|
line, nextIsContinuation, err = r.ReadLine()
|
||||||
|
if err != nil {
|
||||||
|
p = nil
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if isContinuation {
|
||||||
|
p.Header[lastKey] += string(line)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
line = bytes.TrimSpace(line)
|
||||||
|
if len(line) == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
i := bytes.Index(line, []byte(": "))
|
||||||
|
if i == -1 {
|
||||||
|
goto TryNextBlock
|
||||||
|
}
|
||||||
|
lastKey = string(line[:i])
|
||||||
|
p.Header[lastKey] = string(line[i+2:])
|
||||||
|
}
|
||||||
|
|
||||||
|
p.lReader.in = r
|
||||||
|
p.oReader.currentCRC = crc24Init
|
||||||
|
p.oReader.lReader = &p.lReader
|
||||||
|
p.oReader.b64Reader = base64.NewDecoder(base64.StdEncoding, &p.lReader)
|
||||||
|
p.Body = &p.oReader
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
160
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/armor/encode.go
generated
vendored
Normal file
160
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/armor/encode.go
generated
vendored
Normal file
|
@ -0,0 +1,160 @@
|
||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package armor
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/base64"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
var armorHeaderSep = []byte(": ")
|
||||||
|
var blockEnd = []byte("\n=")
|
||||||
|
var newline = []byte("\n")
|
||||||
|
var armorEndOfLineOut = []byte("-----\n")
|
||||||
|
|
||||||
|
// writeSlices writes its arguments to the given Writer.
|
||||||
|
func writeSlices(out io.Writer, slices ...[]byte) (err error) {
|
||||||
|
for _, s := range slices {
|
||||||
|
_, err = out.Write(s)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// lineBreaker breaks data across several lines, all of the same byte length
|
||||||
|
// (except possibly the last). Lines are broken with a single '\n'.
|
||||||
|
type lineBreaker struct {
|
||||||
|
lineLength int
|
||||||
|
line []byte
|
||||||
|
used int
|
||||||
|
out io.Writer
|
||||||
|
haveWritten bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func newLineBreaker(out io.Writer, lineLength int) *lineBreaker {
|
||||||
|
return &lineBreaker{
|
||||||
|
lineLength: lineLength,
|
||||||
|
line: make([]byte, lineLength),
|
||||||
|
used: 0,
|
||||||
|
out: out,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *lineBreaker) Write(b []byte) (n int, err error) {
|
||||||
|
n = len(b)
|
||||||
|
|
||||||
|
if n == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if l.used == 0 && l.haveWritten {
|
||||||
|
_, err = l.out.Write([]byte{'\n'})
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if l.used+len(b) < l.lineLength {
|
||||||
|
l.used += copy(l.line[l.used:], b)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
l.haveWritten = true
|
||||||
|
_, err = l.out.Write(l.line[0:l.used])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
excess := l.lineLength - l.used
|
||||||
|
l.used = 0
|
||||||
|
|
||||||
|
_, err = l.out.Write(b[0:excess])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = l.Write(b[excess:])
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *lineBreaker) Close() (err error) {
|
||||||
|
if l.used > 0 {
|
||||||
|
_, err = l.out.Write(l.line[0:l.used])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// encoding keeps track of a running CRC24 over the data which has been written
|
||||||
|
// to it and outputs a OpenPGP checksum when closed, followed by an armor
|
||||||
|
// trailer.
|
||||||
|
//
|
||||||
|
// It's built into a stack of io.Writers:
|
||||||
|
// encoding -> base64 encoder -> lineBreaker -> out
|
||||||
|
type encoding struct {
|
||||||
|
out io.Writer
|
||||||
|
breaker *lineBreaker
|
||||||
|
b64 io.WriteCloser
|
||||||
|
crc uint32
|
||||||
|
blockType []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *encoding) Write(data []byte) (n int, err error) {
|
||||||
|
e.crc = crc24(e.crc, data)
|
||||||
|
return e.b64.Write(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *encoding) Close() (err error) {
|
||||||
|
err = e.b64.Close()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
e.breaker.Close()
|
||||||
|
|
||||||
|
var checksumBytes [3]byte
|
||||||
|
checksumBytes[0] = byte(e.crc >> 16)
|
||||||
|
checksumBytes[1] = byte(e.crc >> 8)
|
||||||
|
checksumBytes[2] = byte(e.crc)
|
||||||
|
|
||||||
|
var b64ChecksumBytes [4]byte
|
||||||
|
base64.StdEncoding.Encode(b64ChecksumBytes[:], checksumBytes[:])
|
||||||
|
|
||||||
|
return writeSlices(e.out, blockEnd, b64ChecksumBytes[:], newline, armorEnd, e.blockType, armorEndOfLine)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encode returns a WriteCloser which will encode the data written to it in
|
||||||
|
// OpenPGP armor.
|
||||||
|
func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) {
|
||||||
|
bType := []byte(blockType)
|
||||||
|
err = writeSlices(out, armorStart, bType, armorEndOfLineOut)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, v := range headers {
|
||||||
|
err = writeSlices(out, []byte(k), armorHeaderSep, []byte(v), newline)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = out.Write(newline)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
e := &encoding{
|
||||||
|
out: out,
|
||||||
|
breaker: newLineBreaker(out, 64),
|
||||||
|
crc: crc24Init,
|
||||||
|
blockType: bType,
|
||||||
|
}
|
||||||
|
e.b64 = base64.NewEncoder(base64.StdEncoding, e.breaker)
|
||||||
|
return e, nil
|
||||||
|
}
|
59
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/canonical_text.go
generated
vendored
Normal file
59
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/canonical_text.go
generated
vendored
Normal file
|
@ -0,0 +1,59 @@
|
||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package openpgp
|
||||||
|
|
||||||
|
import "hash"
|
||||||
|
|
||||||
|
// NewCanonicalTextHash reformats text written to it into the canonical
|
||||||
|
// form and then applies the hash h. See RFC 4880, section 5.2.1.
|
||||||
|
func NewCanonicalTextHash(h hash.Hash) hash.Hash {
|
||||||
|
return &canonicalTextHash{h, 0}
|
||||||
|
}
|
||||||
|
|
||||||
|
type canonicalTextHash struct {
|
||||||
|
h hash.Hash
|
||||||
|
s int
|
||||||
|
}
|
||||||
|
|
||||||
|
var newline = []byte{'\r', '\n'}
|
||||||
|
|
||||||
|
func (cth *canonicalTextHash) Write(buf []byte) (int, error) {
|
||||||
|
start := 0
|
||||||
|
|
||||||
|
for i, c := range buf {
|
||||||
|
switch cth.s {
|
||||||
|
case 0:
|
||||||
|
if c == '\r' {
|
||||||
|
cth.s = 1
|
||||||
|
} else if c == '\n' {
|
||||||
|
cth.h.Write(buf[start:i])
|
||||||
|
cth.h.Write(newline)
|
||||||
|
start = i + 1
|
||||||
|
}
|
||||||
|
case 1:
|
||||||
|
cth.s = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
cth.h.Write(buf[start:])
|
||||||
|
return len(buf), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cth *canonicalTextHash) Sum(in []byte) []byte {
|
||||||
|
return cth.h.Sum(in)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cth *canonicalTextHash) Reset() {
|
||||||
|
cth.h.Reset()
|
||||||
|
cth.s = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cth *canonicalTextHash) Size() int {
|
||||||
|
return cth.h.Size()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cth *canonicalTextHash) BlockSize() int {
|
||||||
|
return cth.h.BlockSize()
|
||||||
|
}
|
376
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/clearsign/clearsign.go
generated
vendored
Normal file
376
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/clearsign/clearsign.go
generated
vendored
Normal file
|
@ -0,0 +1,376 @@
|
||||||
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package clearsign generates and processes OpenPGP, clear-signed data. See
|
||||||
|
// RFC 4880, section 7.
|
||||||
|
//
|
||||||
|
// Clearsigned messages are cryptographically signed, but the contents of the
|
||||||
|
// message are kept in plaintext so that it can be read without special tools.
|
||||||
|
package clearsign // import "golang.org/x/crypto/openpgp/clearsign"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"crypto"
|
||||||
|
"hash"
|
||||||
|
"io"
|
||||||
|
"net/textproto"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"golang.org/x/crypto/openpgp/armor"
|
||||||
|
"golang.org/x/crypto/openpgp/errors"
|
||||||
|
"golang.org/x/crypto/openpgp/packet"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Block represents a clearsigned message. A signature on a Block can
|
||||||
|
// be checked by passing Bytes into openpgp.CheckDetachedSignature.
|
||||||
|
type Block struct {
|
||||||
|
Headers textproto.MIMEHeader // Optional message headers
|
||||||
|
Plaintext []byte // The original message text
|
||||||
|
Bytes []byte // The signed message
|
||||||
|
ArmoredSignature *armor.Block // The signature block
|
||||||
|
}
|
||||||
|
|
||||||
|
// start is the marker which denotes the beginning of a clearsigned message.
|
||||||
|
var start = []byte("\n-----BEGIN PGP SIGNED MESSAGE-----")
|
||||||
|
|
||||||
|
// dashEscape is prefixed to any lines that begin with a hyphen so that they
|
||||||
|
// can't be confused with endText.
|
||||||
|
var dashEscape = []byte("- ")
|
||||||
|
|
||||||
|
// endText is a marker which denotes the end of the message and the start of
|
||||||
|
// an armored signature.
|
||||||
|
var endText = []byte("-----BEGIN PGP SIGNATURE-----")
|
||||||
|
|
||||||
|
// end is a marker which denotes the end of the armored signature.
|
||||||
|
var end = []byte("\n-----END PGP SIGNATURE-----")
|
||||||
|
|
||||||
|
var crlf = []byte("\r\n")
|
||||||
|
var lf = byte('\n')
|
||||||
|
|
||||||
|
// getLine returns the first \r\n or \n delineated line from the given byte
|
||||||
|
// array. The line does not include the \r\n or \n. The remainder of the byte
|
||||||
|
// array (also not including the new line bytes) is also returned and this will
|
||||||
|
// always be smaller than the original argument.
|
||||||
|
func getLine(data []byte) (line, rest []byte) {
|
||||||
|
i := bytes.Index(data, []byte{'\n'})
|
||||||
|
var j int
|
||||||
|
if i < 0 {
|
||||||
|
i = len(data)
|
||||||
|
j = i
|
||||||
|
} else {
|
||||||
|
j = i + 1
|
||||||
|
if i > 0 && data[i-1] == '\r' {
|
||||||
|
i--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return data[0:i], data[j:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode finds the first clearsigned message in data and returns it, as well
|
||||||
|
// as the suffix of data which remains after the message.
|
||||||
|
func Decode(data []byte) (b *Block, rest []byte) {
|
||||||
|
// start begins with a newline. However, at the very beginning of
|
||||||
|
// the byte array, we'll accept the start string without it.
|
||||||
|
rest = data
|
||||||
|
if bytes.HasPrefix(data, start[1:]) {
|
||||||
|
rest = rest[len(start)-1:]
|
||||||
|
} else if i := bytes.Index(data, start); i >= 0 {
|
||||||
|
rest = rest[i+len(start):]
|
||||||
|
} else {
|
||||||
|
return nil, data
|
||||||
|
}
|
||||||
|
|
||||||
|
// Consume the start line.
|
||||||
|
_, rest = getLine(rest)
|
||||||
|
|
||||||
|
var line []byte
|
||||||
|
b = &Block{
|
||||||
|
Headers: make(textproto.MIMEHeader),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next come a series of header lines.
|
||||||
|
for {
|
||||||
|
// This loop terminates because getLine's second result is
|
||||||
|
// always smaller than its argument.
|
||||||
|
if len(rest) == 0 {
|
||||||
|
return nil, data
|
||||||
|
}
|
||||||
|
// An empty line marks the end of the headers.
|
||||||
|
if line, rest = getLine(rest); len(line) == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
i := bytes.Index(line, []byte{':'})
|
||||||
|
if i == -1 {
|
||||||
|
return nil, data
|
||||||
|
}
|
||||||
|
|
||||||
|
key, val := line[0:i], line[i+1:]
|
||||||
|
key = bytes.TrimSpace(key)
|
||||||
|
val = bytes.TrimSpace(val)
|
||||||
|
b.Headers.Add(string(key), string(val))
|
||||||
|
}
|
||||||
|
|
||||||
|
firstLine := true
|
||||||
|
for {
|
||||||
|
start := rest
|
||||||
|
|
||||||
|
line, rest = getLine(rest)
|
||||||
|
if len(line) == 0 && len(rest) == 0 {
|
||||||
|
// No armored data was found, so this isn't a complete message.
|
||||||
|
return nil, data
|
||||||
|
}
|
||||||
|
if bytes.Equal(line, endText) {
|
||||||
|
// Back up to the start of the line because armor expects to see the
|
||||||
|
// header line.
|
||||||
|
rest = start
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// The final CRLF isn't included in the hash so we don't write it until
|
||||||
|
// we've seen the next line.
|
||||||
|
if firstLine {
|
||||||
|
firstLine = false
|
||||||
|
} else {
|
||||||
|
b.Bytes = append(b.Bytes, crlf...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if bytes.HasPrefix(line, dashEscape) {
|
||||||
|
line = line[2:]
|
||||||
|
}
|
||||||
|
line = bytes.TrimRight(line, " \t")
|
||||||
|
b.Bytes = append(b.Bytes, line...)
|
||||||
|
|
||||||
|
b.Plaintext = append(b.Plaintext, line...)
|
||||||
|
b.Plaintext = append(b.Plaintext, lf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// We want to find the extent of the armored data (including any newlines at
|
||||||
|
// the end).
|
||||||
|
i := bytes.Index(rest, end)
|
||||||
|
if i == -1 {
|
||||||
|
return nil, data
|
||||||
|
}
|
||||||
|
i += len(end)
|
||||||
|
for i < len(rest) && (rest[i] == '\r' || rest[i] == '\n') {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
armored := rest[:i]
|
||||||
|
rest = rest[i:]
|
||||||
|
|
||||||
|
var err error
|
||||||
|
b.ArmoredSignature, err = armor.Decode(bytes.NewBuffer(armored))
|
||||||
|
if err != nil {
|
||||||
|
return nil, data
|
||||||
|
}
|
||||||
|
|
||||||
|
return b, rest
|
||||||
|
}
|
||||||
|
|
||||||
|
// A dashEscaper is an io.WriteCloser which processes the body of a clear-signed
|
||||||
|
// message. The clear-signed message is written to buffered and a hash, suitable
|
||||||
|
// for signing, is maintained in h.
|
||||||
|
//
|
||||||
|
// When closed, an armored signature is created and written to complete the
|
||||||
|
// message.
|
||||||
|
type dashEscaper struct {
|
||||||
|
buffered *bufio.Writer
|
||||||
|
h hash.Hash
|
||||||
|
hashType crypto.Hash
|
||||||
|
|
||||||
|
atBeginningOfLine bool
|
||||||
|
isFirstLine bool
|
||||||
|
|
||||||
|
whitespace []byte
|
||||||
|
byteBuf []byte // a one byte buffer to save allocations
|
||||||
|
|
||||||
|
privateKey *packet.PrivateKey
|
||||||
|
config *packet.Config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *dashEscaper) Write(data []byte) (n int, err error) {
|
||||||
|
for _, b := range data {
|
||||||
|
d.byteBuf[0] = b
|
||||||
|
|
||||||
|
if d.atBeginningOfLine {
|
||||||
|
// The final CRLF isn't included in the hash so we have to wait
|
||||||
|
// until this point (the start of the next line) before writing it.
|
||||||
|
if !d.isFirstLine {
|
||||||
|
d.h.Write(crlf)
|
||||||
|
}
|
||||||
|
d.isFirstLine = false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Any whitespace at the end of the line has to be removed so we
|
||||||
|
// buffer it until we find out whether there's more on this line.
|
||||||
|
if b == ' ' || b == '\t' || b == '\r' {
|
||||||
|
d.whitespace = append(d.whitespace, b)
|
||||||
|
d.atBeginningOfLine = false
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.atBeginningOfLine {
|
||||||
|
// At the beginning of a line, hyphens have to be escaped.
|
||||||
|
if b == '-' {
|
||||||
|
// The signature isn't calculated over the dash-escaped text so
|
||||||
|
// the escape is only written to buffered.
|
||||||
|
if _, err = d.buffered.Write(dashEscape); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
d.h.Write(d.byteBuf)
|
||||||
|
d.atBeginningOfLine = false
|
||||||
|
} else if b == '\n' {
|
||||||
|
// Nothing to do because we delay writing CRLF to the hash.
|
||||||
|
} else {
|
||||||
|
d.h.Write(d.byteBuf)
|
||||||
|
d.atBeginningOfLine = false
|
||||||
|
}
|
||||||
|
if err = d.buffered.WriteByte(b); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if b == '\n' {
|
||||||
|
// We got a raw \n. Drop any trailing whitespace and write a
|
||||||
|
// CRLF.
|
||||||
|
d.whitespace = d.whitespace[:0]
|
||||||
|
// We delay writing CRLF to the hash until the start of the
|
||||||
|
// next line.
|
||||||
|
if err = d.buffered.WriteByte(b); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
d.atBeginningOfLine = true
|
||||||
|
} else {
|
||||||
|
// Any buffered whitespace wasn't at the end of the line so
|
||||||
|
// we need to write it out.
|
||||||
|
if len(d.whitespace) > 0 {
|
||||||
|
d.h.Write(d.whitespace)
|
||||||
|
if _, err = d.buffered.Write(d.whitespace); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
d.whitespace = d.whitespace[:0]
|
||||||
|
}
|
||||||
|
d.h.Write(d.byteBuf)
|
||||||
|
if err = d.buffered.WriteByte(b); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
n = len(data)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *dashEscaper) Close() (err error) {
|
||||||
|
if !d.atBeginningOfLine {
|
||||||
|
if err = d.buffered.WriteByte(lf); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sig := new(packet.Signature)
|
||||||
|
sig.SigType = packet.SigTypeText
|
||||||
|
sig.PubKeyAlgo = d.privateKey.PubKeyAlgo
|
||||||
|
sig.Hash = d.hashType
|
||||||
|
sig.CreationTime = d.config.Now()
|
||||||
|
sig.IssuerKeyId = &d.privateKey.KeyId
|
||||||
|
|
||||||
|
if err = sig.Sign(d.h, d.privateKey, d.config); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
out, err := armor.Encode(d.buffered, "PGP SIGNATURE", nil)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = sig.Serialize(out); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = out.Close(); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = d.buffered.Flush(); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encode returns a WriteCloser which will clear-sign a message with privateKey
|
||||||
|
// and write it to w. If config is nil, sensible defaults are used.
|
||||||
|
func Encode(w io.Writer, privateKey *packet.PrivateKey, config *packet.Config) (plaintext io.WriteCloser, err error) {
|
||||||
|
if privateKey.Encrypted {
|
||||||
|
return nil, errors.InvalidArgumentError("signing key is encrypted")
|
||||||
|
}
|
||||||
|
|
||||||
|
hashType := config.Hash()
|
||||||
|
name := nameOfHash(hashType)
|
||||||
|
if len(name) == 0 {
|
||||||
|
return nil, errors.UnsupportedError("unknown hash type: " + strconv.Itoa(int(hashType)))
|
||||||
|
}
|
||||||
|
|
||||||
|
if !hashType.Available() {
|
||||||
|
return nil, errors.UnsupportedError("unsupported hash type: " + strconv.Itoa(int(hashType)))
|
||||||
|
}
|
||||||
|
h := hashType.New()
|
||||||
|
|
||||||
|
buffered := bufio.NewWriter(w)
|
||||||
|
// start has a \n at the beginning that we don't want here.
|
||||||
|
if _, err = buffered.Write(start[1:]); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = buffered.WriteByte(lf); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if _, err = buffered.WriteString("Hash: "); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if _, err = buffered.WriteString(name); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = buffered.WriteByte(lf); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = buffered.WriteByte(lf); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
plaintext = &dashEscaper{
|
||||||
|
buffered: buffered,
|
||||||
|
h: h,
|
||||||
|
hashType: hashType,
|
||||||
|
|
||||||
|
atBeginningOfLine: true,
|
||||||
|
isFirstLine: true,
|
||||||
|
|
||||||
|
byteBuf: make([]byte, 1),
|
||||||
|
|
||||||
|
privateKey: privateKey,
|
||||||
|
config: config,
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// nameOfHash returns the OpenPGP name for the given hash, or the empty string
|
||||||
|
// if the name isn't known. See RFC 4880, section 9.4.
|
||||||
|
func nameOfHash(h crypto.Hash) string {
|
||||||
|
switch h {
|
||||||
|
case crypto.MD5:
|
||||||
|
return "MD5"
|
||||||
|
case crypto.SHA1:
|
||||||
|
return "SHA1"
|
||||||
|
case crypto.RIPEMD160:
|
||||||
|
return "RIPEMD160"
|
||||||
|
case crypto.SHA224:
|
||||||
|
return "SHA224"
|
||||||
|
case crypto.SHA256:
|
||||||
|
return "SHA256"
|
||||||
|
case crypto.SHA384:
|
||||||
|
return "SHA384"
|
||||||
|
case crypto.SHA512:
|
||||||
|
return "SHA512"
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
122
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go
generated
vendored
Normal file
122
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go
generated
vendored
Normal file
|
@ -0,0 +1,122 @@
|
||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package elgamal implements ElGamal encryption, suitable for OpenPGP,
|
||||||
|
// as specified in "A Public-Key Cryptosystem and a Signature Scheme Based on
|
||||||
|
// Discrete Logarithms," IEEE Transactions on Information Theory, v. IT-31,
|
||||||
|
// n. 4, 1985, pp. 469-472.
|
||||||
|
//
|
||||||
|
// This form of ElGamal embeds PKCS#1 v1.5 padding, which may make it
|
||||||
|
// unsuitable for other protocols. RSA should be used in preference in any
|
||||||
|
// case.
|
||||||
|
package elgamal // import "golang.org/x/crypto/openpgp/elgamal"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/subtle"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"math/big"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PublicKey represents an ElGamal public key.
|
||||||
|
type PublicKey struct {
|
||||||
|
G, P, Y *big.Int
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrivateKey represents an ElGamal private key.
|
||||||
|
type PrivateKey struct {
|
||||||
|
PublicKey
|
||||||
|
X *big.Int
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encrypt encrypts the given message to the given public key. The result is a
|
||||||
|
// pair of integers. Errors can result from reading random, or because msg is
|
||||||
|
// too large to be encrypted to the public key.
|
||||||
|
func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err error) {
|
||||||
|
pLen := (pub.P.BitLen() + 7) / 8
|
||||||
|
if len(msg) > pLen-11 {
|
||||||
|
err = errors.New("elgamal: message too long")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// EM = 0x02 || PS || 0x00 || M
|
||||||
|
em := make([]byte, pLen-1)
|
||||||
|
em[0] = 2
|
||||||
|
ps, mm := em[1:len(em)-len(msg)-1], em[len(em)-len(msg):]
|
||||||
|
err = nonZeroRandomBytes(ps, random)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
em[len(em)-len(msg)-1] = 0
|
||||||
|
copy(mm, msg)
|
||||||
|
|
||||||
|
m := new(big.Int).SetBytes(em)
|
||||||
|
|
||||||
|
k, err := rand.Int(random, pub.P)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c1 = new(big.Int).Exp(pub.G, k, pub.P)
|
||||||
|
s := new(big.Int).Exp(pub.Y, k, pub.P)
|
||||||
|
c2 = s.Mul(s, m)
|
||||||
|
c2.Mod(c2, pub.P)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decrypt takes two integers, resulting from an ElGamal encryption, and
|
||||||
|
// returns the plaintext of the message. An error can result only if the
|
||||||
|
// ciphertext is invalid. Users should keep in mind that this is a padding
|
||||||
|
// oracle and thus, if exposed to an adaptive chosen ciphertext attack, can
|
||||||
|
// be used to break the cryptosystem. See ``Chosen Ciphertext Attacks
|
||||||
|
// Against Protocols Based on the RSA Encryption Standard PKCS #1'', Daniel
|
||||||
|
// Bleichenbacher, Advances in Cryptology (Crypto '98),
|
||||||
|
func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err error) {
|
||||||
|
s := new(big.Int).Exp(c1, priv.X, priv.P)
|
||||||
|
s.ModInverse(s, priv.P)
|
||||||
|
s.Mul(s, c2)
|
||||||
|
s.Mod(s, priv.P)
|
||||||
|
em := s.Bytes()
|
||||||
|
|
||||||
|
firstByteIsTwo := subtle.ConstantTimeByteEq(em[0], 2)
|
||||||
|
|
||||||
|
// The remainder of the plaintext must be a string of non-zero random
|
||||||
|
// octets, followed by a 0, followed by the message.
|
||||||
|
// lookingForIndex: 1 iff we are still looking for the zero.
|
||||||
|
// index: the offset of the first zero byte.
|
||||||
|
var lookingForIndex, index int
|
||||||
|
lookingForIndex = 1
|
||||||
|
|
||||||
|
for i := 1; i < len(em); i++ {
|
||||||
|
equals0 := subtle.ConstantTimeByteEq(em[i], 0)
|
||||||
|
index = subtle.ConstantTimeSelect(lookingForIndex&equals0, i, index)
|
||||||
|
lookingForIndex = subtle.ConstantTimeSelect(equals0, 0, lookingForIndex)
|
||||||
|
}
|
||||||
|
|
||||||
|
if firstByteIsTwo != 1 || lookingForIndex != 0 || index < 9 {
|
||||||
|
return nil, errors.New("elgamal: decryption error")
|
||||||
|
}
|
||||||
|
return em[index+1:], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// nonZeroRandomBytes fills the given slice with non-zero random octets.
|
||||||
|
func nonZeroRandomBytes(s []byte, rand io.Reader) (err error) {
|
||||||
|
_, err = io.ReadFull(rand, s)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
for s[i] == 0 {
|
||||||
|
_, err = io.ReadFull(rand, s[i:i+1])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
72
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/errors/errors.go
generated
vendored
Normal file
72
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/errors/errors.go
generated
vendored
Normal file
|
@ -0,0 +1,72 @@
|
||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package errors contains common error types for the OpenPGP packages.
|
||||||
|
package errors // import "golang.org/x/crypto/openpgp/errors"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A StructuralError is returned when OpenPGP data is found to be syntactically
|
||||||
|
// invalid.
|
||||||
|
type StructuralError string
|
||||||
|
|
||||||
|
func (s StructuralError) Error() string {
|
||||||
|
return "openpgp: invalid data: " + string(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnsupportedError indicates that, although the OpenPGP data is valid, it
|
||||||
|
// makes use of currently unimplemented features.
|
||||||
|
type UnsupportedError string
|
||||||
|
|
||||||
|
func (s UnsupportedError) Error() string {
|
||||||
|
return "openpgp: unsupported feature: " + string(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InvalidArgumentError indicates that the caller is in error and passed an
|
||||||
|
// incorrect value.
|
||||||
|
type InvalidArgumentError string
|
||||||
|
|
||||||
|
func (i InvalidArgumentError) Error() string {
|
||||||
|
return "openpgp: invalid argument: " + string(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SignatureError indicates that a syntactically valid signature failed to
|
||||||
|
// validate.
|
||||||
|
type SignatureError string
|
||||||
|
|
||||||
|
func (b SignatureError) Error() string {
|
||||||
|
return "openpgp: invalid signature: " + string(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
type keyIncorrectError int
|
||||||
|
|
||||||
|
func (ki keyIncorrectError) Error() string {
|
||||||
|
return "openpgp: incorrect key"
|
||||||
|
}
|
||||||
|
|
||||||
|
var ErrKeyIncorrect error = keyIncorrectError(0)
|
||||||
|
|
||||||
|
type unknownIssuerError int
|
||||||
|
|
||||||
|
func (unknownIssuerError) Error() string {
|
||||||
|
return "openpgp: signature made by unknown entity"
|
||||||
|
}
|
||||||
|
|
||||||
|
var ErrUnknownIssuer error = unknownIssuerError(0)
|
||||||
|
|
||||||
|
type keyRevokedError int
|
||||||
|
|
||||||
|
func (keyRevokedError) Error() string {
|
||||||
|
return "openpgp: signature made by revoked key"
|
||||||
|
}
|
||||||
|
|
||||||
|
var ErrKeyRevoked error = keyRevokedError(0)
|
||||||
|
|
||||||
|
type UnknownPacketTypeError uint8
|
||||||
|
|
||||||
|
func (upte UnknownPacketTypeError) Error() string {
|
||||||
|
return "openpgp: unknown packet type: " + strconv.Itoa(int(upte))
|
||||||
|
}
|
637
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/keys.go
generated
vendored
Normal file
637
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/keys.go
generated
vendored
Normal file
|
@ -0,0 +1,637 @@
|
||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package openpgp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rsa"
|
||||||
|
"io"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/crypto/openpgp/armor"
|
||||||
|
"golang.org/x/crypto/openpgp/errors"
|
||||||
|
"golang.org/x/crypto/openpgp/packet"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PublicKeyType is the armor type for a PGP public key.
|
||||||
|
var PublicKeyType = "PGP PUBLIC KEY BLOCK"
|
||||||
|
|
||||||
|
// PrivateKeyType is the armor type for a PGP private key.
|
||||||
|
var PrivateKeyType = "PGP PRIVATE KEY BLOCK"
|
||||||
|
|
||||||
|
// An Entity represents the components of an OpenPGP key: a primary public key
|
||||||
|
// (which must be a signing key), one or more identities claimed by that key,
|
||||||
|
// and zero or more subkeys, which may be encryption keys.
|
||||||
|
type Entity struct {
|
||||||
|
PrimaryKey *packet.PublicKey
|
||||||
|
PrivateKey *packet.PrivateKey
|
||||||
|
Identities map[string]*Identity // indexed by Identity.Name
|
||||||
|
Revocations []*packet.Signature
|
||||||
|
Subkeys []Subkey
|
||||||
|
}
|
||||||
|
|
||||||
|
// An Identity represents an identity claimed by an Entity and zero or more
|
||||||
|
// assertions by other entities about that claim.
|
||||||
|
type Identity struct {
|
||||||
|
Name string // by convention, has the form "Full Name (comment) <email@example.com>"
|
||||||
|
UserId *packet.UserId
|
||||||
|
SelfSignature *packet.Signature
|
||||||
|
Signatures []*packet.Signature
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Subkey is an additional public key in an Entity. Subkeys can be used for
|
||||||
|
// encryption.
|
||||||
|
type Subkey struct {
|
||||||
|
PublicKey *packet.PublicKey
|
||||||
|
PrivateKey *packet.PrivateKey
|
||||||
|
Sig *packet.Signature
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Key identifies a specific public key in an Entity. This is either the
|
||||||
|
// Entity's primary key or a subkey.
|
||||||
|
type Key struct {
|
||||||
|
Entity *Entity
|
||||||
|
PublicKey *packet.PublicKey
|
||||||
|
PrivateKey *packet.PrivateKey
|
||||||
|
SelfSignature *packet.Signature
|
||||||
|
}
|
||||||
|
|
||||||
|
// A KeyRing provides access to public and private keys.
|
||||||
|
type KeyRing interface {
|
||||||
|
// KeysById returns the set of keys that have the given key id.
|
||||||
|
KeysById(id uint64) []Key
|
||||||
|
// KeysByIdAndUsage returns the set of keys with the given id
|
||||||
|
// that also meet the key usage given by requiredUsage.
|
||||||
|
// The requiredUsage is expressed as the bitwise-OR of
|
||||||
|
// packet.KeyFlag* values.
|
||||||
|
KeysByIdUsage(id uint64, requiredUsage byte) []Key
|
||||||
|
// DecryptionKeys returns all private keys that are valid for
|
||||||
|
// decryption.
|
||||||
|
DecryptionKeys() []Key
|
||||||
|
}
|
||||||
|
|
||||||
|
// primaryIdentity returns the Identity marked as primary or the first identity
|
||||||
|
// if none are so marked.
|
||||||
|
func (e *Entity) primaryIdentity() *Identity {
|
||||||
|
var firstIdentity *Identity
|
||||||
|
for _, ident := range e.Identities {
|
||||||
|
if firstIdentity == nil {
|
||||||
|
firstIdentity = ident
|
||||||
|
}
|
||||||
|
if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId {
|
||||||
|
return ident
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return firstIdentity
|
||||||
|
}
|
||||||
|
|
||||||
|
// encryptionKey returns the best candidate Key for encrypting a message to the
|
||||||
|
// given Entity.
|
||||||
|
func (e *Entity) encryptionKey(now time.Time) (Key, bool) {
|
||||||
|
candidateSubkey := -1
|
||||||
|
|
||||||
|
// Iterate the keys to find the newest key
|
||||||
|
var maxTime time.Time
|
||||||
|
for i, subkey := range e.Subkeys {
|
||||||
|
if subkey.Sig.FlagsValid &&
|
||||||
|
subkey.Sig.FlagEncryptCommunications &&
|
||||||
|
subkey.PublicKey.PubKeyAlgo.CanEncrypt() &&
|
||||||
|
!subkey.Sig.KeyExpired(now) &&
|
||||||
|
(maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) {
|
||||||
|
candidateSubkey = i
|
||||||
|
maxTime = subkey.Sig.CreationTime
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if candidateSubkey != -1 {
|
||||||
|
subkey := e.Subkeys[candidateSubkey]
|
||||||
|
return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we don't have any candidate subkeys for encryption and
|
||||||
|
// the primary key doesn't have any usage metadata then we
|
||||||
|
// assume that the primary key is ok. Or, if the primary key is
|
||||||
|
// marked as ok to encrypt to, then we can obviously use it.
|
||||||
|
i := e.primaryIdentity()
|
||||||
|
if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagEncryptCommunications &&
|
||||||
|
e.PrimaryKey.PubKeyAlgo.CanEncrypt() &&
|
||||||
|
!i.SelfSignature.KeyExpired(now) {
|
||||||
|
return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// This Entity appears to be signing only.
|
||||||
|
return Key{}, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// signingKey return the best candidate Key for signing a message with this
|
||||||
|
// Entity.
|
||||||
|
func (e *Entity) signingKey(now time.Time) (Key, bool) {
|
||||||
|
candidateSubkey := -1
|
||||||
|
|
||||||
|
for i, subkey := range e.Subkeys {
|
||||||
|
if subkey.Sig.FlagsValid &&
|
||||||
|
subkey.Sig.FlagSign &&
|
||||||
|
subkey.PublicKey.PubKeyAlgo.CanSign() &&
|
||||||
|
!subkey.Sig.KeyExpired(now) {
|
||||||
|
candidateSubkey = i
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if candidateSubkey != -1 {
|
||||||
|
subkey := e.Subkeys[candidateSubkey]
|
||||||
|
return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we have no candidate subkey then we assume that it's ok to sign
|
||||||
|
// with the primary key.
|
||||||
|
i := e.primaryIdentity()
|
||||||
|
if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagSign &&
|
||||||
|
!i.SelfSignature.KeyExpired(now) {
|
||||||
|
return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true
|
||||||
|
}
|
||||||
|
|
||||||
|
return Key{}, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// An EntityList contains one or more Entities.
|
||||||
|
type EntityList []*Entity
|
||||||
|
|
||||||
|
// KeysById returns the set of keys that have the given key id.
|
||||||
|
func (el EntityList) KeysById(id uint64) (keys []Key) {
|
||||||
|
for _, e := range el {
|
||||||
|
if e.PrimaryKey.KeyId == id {
|
||||||
|
var selfSig *packet.Signature
|
||||||
|
for _, ident := range e.Identities {
|
||||||
|
if selfSig == nil {
|
||||||
|
selfSig = ident.SelfSignature
|
||||||
|
} else if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId {
|
||||||
|
selfSig = ident.SelfSignature
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig})
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, subKey := range e.Subkeys {
|
||||||
|
if subKey.PublicKey.KeyId == id {
|
||||||
|
keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeysByIdAndUsage returns the set of keys with the given id that also meet
|
||||||
|
// the key usage given by requiredUsage. The requiredUsage is expressed as
|
||||||
|
// the bitwise-OR of packet.KeyFlag* values.
|
||||||
|
func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) {
|
||||||
|
for _, key := range el.KeysById(id) {
|
||||||
|
if len(key.Entity.Revocations) > 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if key.SelfSignature.RevocationReason != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if key.SelfSignature.FlagsValid && requiredUsage != 0 {
|
||||||
|
var usage byte
|
||||||
|
if key.SelfSignature.FlagCertify {
|
||||||
|
usage |= packet.KeyFlagCertify
|
||||||
|
}
|
||||||
|
if key.SelfSignature.FlagSign {
|
||||||
|
usage |= packet.KeyFlagSign
|
||||||
|
}
|
||||||
|
if key.SelfSignature.FlagEncryptCommunications {
|
||||||
|
usage |= packet.KeyFlagEncryptCommunications
|
||||||
|
}
|
||||||
|
if key.SelfSignature.FlagEncryptStorage {
|
||||||
|
usage |= packet.KeyFlagEncryptStorage
|
||||||
|
}
|
||||||
|
if usage&requiredUsage != requiredUsage {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
keys = append(keys, key)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecryptionKeys returns all private keys that are valid for decryption.
|
||||||
|
func (el EntityList) DecryptionKeys() (keys []Key) {
|
||||||
|
for _, e := range el {
|
||||||
|
for _, subKey := range e.Subkeys {
|
||||||
|
if subKey.PrivateKey != nil && (!subKey.Sig.FlagsValid || subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) {
|
||||||
|
keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadArmoredKeyRing reads one or more public/private keys from an armor keyring file.
|
||||||
|
func ReadArmoredKeyRing(r io.Reader) (EntityList, error) {
|
||||||
|
block, err := armor.Decode(r)
|
||||||
|
if err == io.EOF {
|
||||||
|
return nil, errors.InvalidArgumentError("no armored data found")
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if block.Type != PublicKeyType && block.Type != PrivateKeyType {
|
||||||
|
return nil, errors.InvalidArgumentError("expected public or private key block, got: " + block.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ReadKeyRing(block.Body)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadKeyRing reads one or more public/private keys. Unsupported keys are
|
||||||
|
// ignored as long as at least a single valid key is found.
|
||||||
|
func ReadKeyRing(r io.Reader) (el EntityList, err error) {
|
||||||
|
packets := packet.NewReader(r)
|
||||||
|
var lastUnsupportedError error
|
||||||
|
|
||||||
|
for {
|
||||||
|
var e *Entity
|
||||||
|
e, err = ReadEntity(packets)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: warn about skipped unsupported/unreadable keys
|
||||||
|
if _, ok := err.(errors.UnsupportedError); ok {
|
||||||
|
lastUnsupportedError = err
|
||||||
|
err = readToNextPublicKey(packets)
|
||||||
|
} else if _, ok := err.(errors.StructuralError); ok {
|
||||||
|
// Skip unreadable, badly-formatted keys
|
||||||
|
lastUnsupportedError = err
|
||||||
|
err = readToNextPublicKey(packets)
|
||||||
|
}
|
||||||
|
if err == io.EOF {
|
||||||
|
err = nil
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
el = nil
|
||||||
|
break
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
el = append(el, e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(el) == 0 && err == nil {
|
||||||
|
err = lastUnsupportedError
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// readToNextPublicKey reads packets until the start of the entity and leaves
|
||||||
|
// the first packet of the new entity in the Reader.
|
||||||
|
func readToNextPublicKey(packets *packet.Reader) (err error) {
|
||||||
|
var p packet.Packet
|
||||||
|
for {
|
||||||
|
p, err = packets.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
return
|
||||||
|
} else if err != nil {
|
||||||
|
if _, ok := err.(errors.UnsupportedError); ok {
|
||||||
|
err = nil
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if pk, ok := p.(*packet.PublicKey); ok && !pk.IsSubkey {
|
||||||
|
packets.Unread(p)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadEntity reads an entity (public key, identities, subkeys etc) from the
|
||||||
|
// given Reader.
|
||||||
|
func ReadEntity(packets *packet.Reader) (*Entity, error) {
|
||||||
|
e := new(Entity)
|
||||||
|
e.Identities = make(map[string]*Identity)
|
||||||
|
|
||||||
|
p, err := packets.Next()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var ok bool
|
||||||
|
if e.PrimaryKey, ok = p.(*packet.PublicKey); !ok {
|
||||||
|
if e.PrivateKey, ok = p.(*packet.PrivateKey); !ok {
|
||||||
|
packets.Unread(p)
|
||||||
|
return nil, errors.StructuralError("first packet was not a public/private key")
|
||||||
|
} else {
|
||||||
|
e.PrimaryKey = &e.PrivateKey.PublicKey
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !e.PrimaryKey.PubKeyAlgo.CanSign() {
|
||||||
|
return nil, errors.StructuralError("primary key cannot be used for signatures")
|
||||||
|
}
|
||||||
|
|
||||||
|
var current *Identity
|
||||||
|
var revocations []*packet.Signature
|
||||||
|
EachPacket:
|
||||||
|
for {
|
||||||
|
p, err := packets.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch pkt := p.(type) {
|
||||||
|
case *packet.UserId:
|
||||||
|
current = new(Identity)
|
||||||
|
current.Name = pkt.Id
|
||||||
|
current.UserId = pkt
|
||||||
|
e.Identities[pkt.Id] = current
|
||||||
|
|
||||||
|
for {
|
||||||
|
p, err = packets.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
return nil, io.ErrUnexpectedEOF
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
sig, ok := p.(*packet.Signature)
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.StructuralError("user ID packet not followed by self-signature")
|
||||||
|
}
|
||||||
|
|
||||||
|
if (sig.SigType == packet.SigTypePositiveCert || sig.SigType == packet.SigTypeGenericCert) && sig.IssuerKeyId != nil && *sig.IssuerKeyId == e.PrimaryKey.KeyId {
|
||||||
|
if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil {
|
||||||
|
return nil, errors.StructuralError("user ID self-signature invalid: " + err.Error())
|
||||||
|
}
|
||||||
|
current.SelfSignature = sig
|
||||||
|
break
|
||||||
|
}
|
||||||
|
current.Signatures = append(current.Signatures, sig)
|
||||||
|
}
|
||||||
|
case *packet.Signature:
|
||||||
|
if pkt.SigType == packet.SigTypeKeyRevocation {
|
||||||
|
revocations = append(revocations, pkt)
|
||||||
|
} else if pkt.SigType == packet.SigTypeDirectSignature {
|
||||||
|
// TODO: RFC4880 5.2.1 permits signatures
|
||||||
|
// directly on keys (eg. to bind additional
|
||||||
|
// revocation keys).
|
||||||
|
} else if current == nil {
|
||||||
|
return nil, errors.StructuralError("signature packet found before user id packet")
|
||||||
|
} else {
|
||||||
|
current.Signatures = append(current.Signatures, pkt)
|
||||||
|
}
|
||||||
|
case *packet.PrivateKey:
|
||||||
|
if pkt.IsSubkey == false {
|
||||||
|
packets.Unread(p)
|
||||||
|
break EachPacket
|
||||||
|
}
|
||||||
|
err = addSubkey(e, packets, &pkt.PublicKey, pkt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
case *packet.PublicKey:
|
||||||
|
if pkt.IsSubkey == false {
|
||||||
|
packets.Unread(p)
|
||||||
|
break EachPacket
|
||||||
|
}
|
||||||
|
err = addSubkey(e, packets, pkt, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
// we ignore unknown packets
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(e.Identities) == 0 {
|
||||||
|
return nil, errors.StructuralError("entity without any identities")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, revocation := range revocations {
|
||||||
|
err = e.PrimaryKey.VerifyRevocationSignature(revocation)
|
||||||
|
if err == nil {
|
||||||
|
e.Revocations = append(e.Revocations, revocation)
|
||||||
|
} else {
|
||||||
|
// TODO: RFC 4880 5.2.3.15 defines revocation keys.
|
||||||
|
return nil, errors.StructuralError("revocation signature signed by alternate key")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return e, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *packet.PrivateKey) error {
|
||||||
|
var subKey Subkey
|
||||||
|
subKey.PublicKey = pub
|
||||||
|
subKey.PrivateKey = priv
|
||||||
|
p, err := packets.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return errors.StructuralError("subkey signature invalid: " + err.Error())
|
||||||
|
}
|
||||||
|
var ok bool
|
||||||
|
subKey.Sig, ok = p.(*packet.Signature)
|
||||||
|
if !ok {
|
||||||
|
return errors.StructuralError("subkey packet not followed by signature")
|
||||||
|
}
|
||||||
|
if subKey.Sig.SigType != packet.SigTypeSubkeyBinding && subKey.Sig.SigType != packet.SigTypeSubkeyRevocation {
|
||||||
|
return errors.StructuralError("subkey signature with wrong type")
|
||||||
|
}
|
||||||
|
err = e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, subKey.Sig)
|
||||||
|
if err != nil {
|
||||||
|
return errors.StructuralError("subkey signature invalid: " + err.Error())
|
||||||
|
}
|
||||||
|
e.Subkeys = append(e.Subkeys, subKey)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const defaultRSAKeyBits = 2048
|
||||||
|
|
||||||
|
// NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a
|
||||||
|
// single identity composed of the given full name, comment and email, any of
|
||||||
|
// which may be empty but must not contain any of "()<>\x00".
|
||||||
|
// If config is nil, sensible defaults will be used.
|
||||||
|
func NewEntity(name, comment, email string, config *packet.Config) (*Entity, error) {
|
||||||
|
currentTime := config.Now()
|
||||||
|
|
||||||
|
bits := defaultRSAKeyBits
|
||||||
|
if config != nil && config.RSABits != 0 {
|
||||||
|
bits = config.RSABits
|
||||||
|
}
|
||||||
|
|
||||||
|
uid := packet.NewUserId(name, comment, email)
|
||||||
|
if uid == nil {
|
||||||
|
return nil, errors.InvalidArgumentError("user id field contained invalid characters")
|
||||||
|
}
|
||||||
|
signingPriv, err := rsa.GenerateKey(config.Random(), bits)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
encryptingPriv, err := rsa.GenerateKey(config.Random(), bits)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
e := &Entity{
|
||||||
|
PrimaryKey: packet.NewRSAPublicKey(currentTime, &signingPriv.PublicKey),
|
||||||
|
PrivateKey: packet.NewRSAPrivateKey(currentTime, signingPriv),
|
||||||
|
Identities: make(map[string]*Identity),
|
||||||
|
}
|
||||||
|
isPrimaryId := true
|
||||||
|
e.Identities[uid.Id] = &Identity{
|
||||||
|
Name: uid.Name,
|
||||||
|
UserId: uid,
|
||||||
|
SelfSignature: &packet.Signature{
|
||||||
|
CreationTime: currentTime,
|
||||||
|
SigType: packet.SigTypePositiveCert,
|
||||||
|
PubKeyAlgo: packet.PubKeyAlgoRSA,
|
||||||
|
Hash: config.Hash(),
|
||||||
|
IsPrimaryId: &isPrimaryId,
|
||||||
|
FlagsValid: true,
|
||||||
|
FlagSign: true,
|
||||||
|
FlagCertify: true,
|
||||||
|
IssuerKeyId: &e.PrimaryKey.KeyId,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the user passes in a DefaultHash via packet.Config,
|
||||||
|
// set the PreferredHash for the SelfSignature.
|
||||||
|
if config != nil && config.DefaultHash != 0 {
|
||||||
|
e.Identities[uid.Id].SelfSignature.PreferredHash = []uint8{hashToHashId(config.DefaultHash)}
|
||||||
|
}
|
||||||
|
|
||||||
|
e.Subkeys = make([]Subkey, 1)
|
||||||
|
e.Subkeys[0] = Subkey{
|
||||||
|
PublicKey: packet.NewRSAPublicKey(currentTime, &encryptingPriv.PublicKey),
|
||||||
|
PrivateKey: packet.NewRSAPrivateKey(currentTime, encryptingPriv),
|
||||||
|
Sig: &packet.Signature{
|
||||||
|
CreationTime: currentTime,
|
||||||
|
SigType: packet.SigTypeSubkeyBinding,
|
||||||
|
PubKeyAlgo: packet.PubKeyAlgoRSA,
|
||||||
|
Hash: config.Hash(),
|
||||||
|
FlagsValid: true,
|
||||||
|
FlagEncryptStorage: true,
|
||||||
|
FlagEncryptCommunications: true,
|
||||||
|
IssuerKeyId: &e.PrimaryKey.KeyId,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
e.Subkeys[0].PublicKey.IsSubkey = true
|
||||||
|
e.Subkeys[0].PrivateKey.IsSubkey = true
|
||||||
|
|
||||||
|
return e, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SerializePrivate serializes an Entity, including private key material, to
|
||||||
|
// the given Writer. For now, it must only be used on an Entity returned from
|
||||||
|
// NewEntity.
|
||||||
|
// If config is nil, sensible defaults will be used.
|
||||||
|
func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error) {
|
||||||
|
err = e.PrivateKey.Serialize(w)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for _, ident := range e.Identities {
|
||||||
|
err = ident.UserId.Serialize(w)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey, config)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = ident.SelfSignature.Serialize(w)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, subkey := range e.Subkeys {
|
||||||
|
err = subkey.PrivateKey.Serialize(w)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = subkey.Sig.Serialize(w)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Serialize writes the public part of the given Entity to w. (No private
|
||||||
|
// key material will be output).
|
||||||
|
func (e *Entity) Serialize(w io.Writer) error {
|
||||||
|
err := e.PrimaryKey.Serialize(w)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, ident := range e.Identities {
|
||||||
|
err = ident.UserId.Serialize(w)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = ident.SelfSignature.Serialize(w)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, sig := range ident.Signatures {
|
||||||
|
err = sig.Serialize(w)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, subkey := range e.Subkeys {
|
||||||
|
err = subkey.PublicKey.Serialize(w)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = subkey.Sig.Serialize(w)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SignIdentity adds a signature to e, from signer, attesting that identity is
|
||||||
|
// associated with e. The provided identity must already be an element of
|
||||||
|
// e.Identities and the private key of signer must have been decrypted if
|
||||||
|
// necessary.
|
||||||
|
// If config is nil, sensible defaults will be used.
|
||||||
|
func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Config) error {
|
||||||
|
if signer.PrivateKey == nil {
|
||||||
|
return errors.InvalidArgumentError("signing Entity must have a private key")
|
||||||
|
}
|
||||||
|
if signer.PrivateKey.Encrypted {
|
||||||
|
return errors.InvalidArgumentError("signing Entity's private key must be decrypted")
|
||||||
|
}
|
||||||
|
ident, ok := e.Identities[identity]
|
||||||
|
if !ok {
|
||||||
|
return errors.InvalidArgumentError("given identity string not found in Entity")
|
||||||
|
}
|
||||||
|
|
||||||
|
sig := &packet.Signature{
|
||||||
|
SigType: packet.SigTypeGenericCert,
|
||||||
|
PubKeyAlgo: signer.PrivateKey.PubKeyAlgo,
|
||||||
|
Hash: config.Hash(),
|
||||||
|
CreationTime: config.Now(),
|
||||||
|
IssuerKeyId: &signer.PrivateKey.KeyId,
|
||||||
|
}
|
||||||
|
if err := sig.SignUserId(identity, e.PrimaryKey, signer.PrivateKey, config); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ident.Signatures = append(ident.Signatures, sig)
|
||||||
|
return nil
|
||||||
|
}
|
123
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/packet/compressed.go
generated
vendored
Normal file
123
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/packet/compressed.go
generated
vendored
Normal file
|
@ -0,0 +1,123 @@
|
||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package packet
|
||||||
|
|
||||||
|
import (
|
||||||
|
"compress/bzip2"
|
||||||
|
"compress/flate"
|
||||||
|
"compress/zlib"
|
||||||
|
"golang.org/x/crypto/openpgp/errors"
|
||||||
|
"io"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Compressed represents a compressed OpenPGP packet. The decompressed contents
|
||||||
|
// will contain more OpenPGP packets. See RFC 4880, section 5.6.
|
||||||
|
type Compressed struct {
|
||||||
|
Body io.Reader
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
NoCompression = flate.NoCompression
|
||||||
|
BestSpeed = flate.BestSpeed
|
||||||
|
BestCompression = flate.BestCompression
|
||||||
|
DefaultCompression = flate.DefaultCompression
|
||||||
|
)
|
||||||
|
|
||||||
|
// CompressionConfig contains compressor configuration settings.
|
||||||
|
type CompressionConfig struct {
|
||||||
|
// Level is the compression level to use. It must be set to
|
||||||
|
// between -1 and 9, with -1 causing the compressor to use the
|
||||||
|
// default compression level, 0 causing the compressor to use
|
||||||
|
// no compression and 1 to 9 representing increasing (better,
|
||||||
|
// slower) compression levels. If Level is less than -1 or
|
||||||
|
// more then 9, a non-nil error will be returned during
|
||||||
|
// encryption. See the constants above for convenient common
|
||||||
|
// settings for Level.
|
||||||
|
Level int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Compressed) parse(r io.Reader) error {
|
||||||
|
var buf [1]byte
|
||||||
|
_, err := readFull(r, buf[:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch buf[0] {
|
||||||
|
case 1:
|
||||||
|
c.Body = flate.NewReader(r)
|
||||||
|
case 2:
|
||||||
|
c.Body, err = zlib.NewReader(r)
|
||||||
|
case 3:
|
||||||
|
c.Body = bzip2.NewReader(r)
|
||||||
|
default:
|
||||||
|
err = errors.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0])))
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// compressedWriterCloser represents the serialized compression stream
|
||||||
|
// header and the compressor. Its Close() method ensures that both the
|
||||||
|
// compressor and serialized stream header are closed. Its Write()
|
||||||
|
// method writes to the compressor.
|
||||||
|
type compressedWriteCloser struct {
|
||||||
|
sh io.Closer // Stream Header
|
||||||
|
c io.WriteCloser // Compressor
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cwc compressedWriteCloser) Write(p []byte) (int, error) {
|
||||||
|
return cwc.c.Write(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cwc compressedWriteCloser) Close() (err error) {
|
||||||
|
err = cwc.c.Close()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return cwc.sh.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SerializeCompressed serializes a compressed data packet to w and
|
||||||
|
// returns a WriteCloser to which the literal data packets themselves
|
||||||
|
// can be written and which MUST be closed on completion. If cc is
|
||||||
|
// nil, sensible defaults will be used to configure the compression
|
||||||
|
// algorithm.
|
||||||
|
func SerializeCompressed(w io.WriteCloser, algo CompressionAlgo, cc *CompressionConfig) (literaldata io.WriteCloser, err error) {
|
||||||
|
compressed, err := serializeStreamHeader(w, packetTypeCompressed)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = compressed.Write([]byte{uint8(algo)})
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
level := DefaultCompression
|
||||||
|
if cc != nil {
|
||||||
|
level = cc.Level
|
||||||
|
}
|
||||||
|
|
||||||
|
var compressor io.WriteCloser
|
||||||
|
switch algo {
|
||||||
|
case CompressionZIP:
|
||||||
|
compressor, err = flate.NewWriter(compressed, level)
|
||||||
|
case CompressionZLIB:
|
||||||
|
compressor, err = zlib.NewWriterLevel(compressed, level)
|
||||||
|
default:
|
||||||
|
s := strconv.Itoa(int(algo))
|
||||||
|
err = errors.UnsupportedError("Unsupported compression algorithm: " + s)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
literaldata = compressedWriteCloser{compressed, compressor}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
91
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/packet/config.go
generated
vendored
Normal file
91
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/packet/config.go
generated
vendored
Normal file
|
@ -0,0 +1,91 @@
|
||||||
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package packet
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto"
|
||||||
|
"crypto/rand"
|
||||||
|
"io"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Config collects a number of parameters along with sensible defaults.
|
||||||
|
// A nil *Config is valid and results in all default values.
|
||||||
|
type Config struct {
|
||||||
|
// Rand provides the source of entropy.
|
||||||
|
// If nil, the crypto/rand Reader is used.
|
||||||
|
Rand io.Reader
|
||||||
|
// DefaultHash is the default hash function to be used.
|
||||||
|
// If zero, SHA-256 is used.
|
||||||
|
DefaultHash crypto.Hash
|
||||||
|
// DefaultCipher is the cipher to be used.
|
||||||
|
// If zero, AES-128 is used.
|
||||||
|
DefaultCipher CipherFunction
|
||||||
|
// Time returns the current time as the number of seconds since the
|
||||||
|
// epoch. If Time is nil, time.Now is used.
|
||||||
|
Time func() time.Time
|
||||||
|
// DefaultCompressionAlgo is the compression algorithm to be
|
||||||
|
// applied to the plaintext before encryption. If zero, no
|
||||||
|
// compression is done.
|
||||||
|
DefaultCompressionAlgo CompressionAlgo
|
||||||
|
// CompressionConfig configures the compression settings.
|
||||||
|
CompressionConfig *CompressionConfig
|
||||||
|
// S2KCount is only used for symmetric encryption. It
|
||||||
|
// determines the strength of the passphrase stretching when
|
||||||
|
// the said passphrase is hashed to produce a key. S2KCount
|
||||||
|
// should be between 1024 and 65011712, inclusive. If Config
|
||||||
|
// is nil or S2KCount is 0, the value 65536 used. Not all
|
||||||
|
// values in the above range can be represented. S2KCount will
|
||||||
|
// be rounded up to the next representable value if it cannot
|
||||||
|
// be encoded exactly. When set, it is strongly encrouraged to
|
||||||
|
// use a value that is at least 65536. See RFC 4880 Section
|
||||||
|
// 3.7.1.3.
|
||||||
|
S2KCount int
|
||||||
|
// RSABits is the number of bits in new RSA keys made with NewEntity.
|
||||||
|
// If zero, then 2048 bit keys are created.
|
||||||
|
RSABits int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Config) Random() io.Reader {
|
||||||
|
if c == nil || c.Rand == nil {
|
||||||
|
return rand.Reader
|
||||||
|
}
|
||||||
|
return c.Rand
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Config) Hash() crypto.Hash {
|
||||||
|
if c == nil || uint(c.DefaultHash) == 0 {
|
||||||
|
return crypto.SHA256
|
||||||
|
}
|
||||||
|
return c.DefaultHash
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Config) Cipher() CipherFunction {
|
||||||
|
if c == nil || uint8(c.DefaultCipher) == 0 {
|
||||||
|
return CipherAES128
|
||||||
|
}
|
||||||
|
return c.DefaultCipher
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Config) Now() time.Time {
|
||||||
|
if c == nil || c.Time == nil {
|
||||||
|
return time.Now()
|
||||||
|
}
|
||||||
|
return c.Time()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Config) Compression() CompressionAlgo {
|
||||||
|
if c == nil {
|
||||||
|
return CompressionNone
|
||||||
|
}
|
||||||
|
return c.DefaultCompressionAlgo
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Config) PasswordHashIterations() int {
|
||||||
|
if c == nil || c.S2KCount == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return c.S2KCount
|
||||||
|
}
|
199
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go
generated
vendored
Normal file
199
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go
generated
vendored
Normal file
|
@ -0,0 +1,199 @@
|
||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package packet
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rsa"
|
||||||
|
"encoding/binary"
|
||||||
|
"io"
|
||||||
|
"math/big"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"golang.org/x/crypto/openpgp/elgamal"
|
||||||
|
"golang.org/x/crypto/openpgp/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
const encryptedKeyVersion = 3
|
||||||
|
|
||||||
|
// EncryptedKey represents a public-key encrypted session key. See RFC 4880,
|
||||||
|
// section 5.1.
|
||||||
|
type EncryptedKey struct {
|
||||||
|
KeyId uint64
|
||||||
|
Algo PublicKeyAlgorithm
|
||||||
|
CipherFunc CipherFunction // only valid after a successful Decrypt
|
||||||
|
Key []byte // only valid after a successful Decrypt
|
||||||
|
|
||||||
|
encryptedMPI1, encryptedMPI2 parsedMPI
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *EncryptedKey) parse(r io.Reader) (err error) {
|
||||||
|
var buf [10]byte
|
||||||
|
_, err = readFull(r, buf[:])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if buf[0] != encryptedKeyVersion {
|
||||||
|
return errors.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0])))
|
||||||
|
}
|
||||||
|
e.KeyId = binary.BigEndian.Uint64(buf[1:9])
|
||||||
|
e.Algo = PublicKeyAlgorithm(buf[9])
|
||||||
|
switch e.Algo {
|
||||||
|
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
|
||||||
|
e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r)
|
||||||
|
case PubKeyAlgoElGamal:
|
||||||
|
e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
e.encryptedMPI2.bytes, e.encryptedMPI2.bitLength, err = readMPI(r)
|
||||||
|
}
|
||||||
|
_, err = consumeAll(r)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func checksumKeyMaterial(key []byte) uint16 {
|
||||||
|
var checksum uint16
|
||||||
|
for _, v := range key {
|
||||||
|
checksum += uint16(v)
|
||||||
|
}
|
||||||
|
return checksum
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decrypt decrypts an encrypted session key with the given private key. The
|
||||||
|
// private key must have been decrypted first.
|
||||||
|
// If config is nil, sensible defaults will be used.
|
||||||
|
func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error {
|
||||||
|
var err error
|
||||||
|
var b []byte
|
||||||
|
|
||||||
|
// TODO(agl): use session key decryption routines here to avoid
|
||||||
|
// padding oracle attacks.
|
||||||
|
switch priv.PubKeyAlgo {
|
||||||
|
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
|
||||||
|
b, err = rsa.DecryptPKCS1v15(config.Random(), priv.PrivateKey.(*rsa.PrivateKey), e.encryptedMPI1.bytes)
|
||||||
|
case PubKeyAlgoElGamal:
|
||||||
|
c1 := new(big.Int).SetBytes(e.encryptedMPI1.bytes)
|
||||||
|
c2 := new(big.Int).SetBytes(e.encryptedMPI2.bytes)
|
||||||
|
b, err = elgamal.Decrypt(priv.PrivateKey.(*elgamal.PrivateKey), c1, c2)
|
||||||
|
default:
|
||||||
|
err = errors.InvalidArgumentError("cannot decrypted encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo)))
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
e.CipherFunc = CipherFunction(b[0])
|
||||||
|
e.Key = b[1 : len(b)-2]
|
||||||
|
expectedChecksum := uint16(b[len(b)-2])<<8 | uint16(b[len(b)-1])
|
||||||
|
checksum := checksumKeyMaterial(e.Key)
|
||||||
|
if checksum != expectedChecksum {
|
||||||
|
return errors.StructuralError("EncryptedKey checksum incorrect")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Serialize writes the encrypted key packet, e, to w.
|
||||||
|
func (e *EncryptedKey) Serialize(w io.Writer) error {
|
||||||
|
var mpiLen int
|
||||||
|
switch e.Algo {
|
||||||
|
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
|
||||||
|
mpiLen = 2 + len(e.encryptedMPI1.bytes)
|
||||||
|
case PubKeyAlgoElGamal:
|
||||||
|
mpiLen = 2 + len(e.encryptedMPI1.bytes) + 2 + len(e.encryptedMPI2.bytes)
|
||||||
|
default:
|
||||||
|
return errors.InvalidArgumentError("don't know how to serialize encrypted key type " + strconv.Itoa(int(e.Algo)))
|
||||||
|
}
|
||||||
|
|
||||||
|
serializeHeader(w, packetTypeEncryptedKey, 1 /* version */ +8 /* key id */ +1 /* algo */ +mpiLen)
|
||||||
|
|
||||||
|
w.Write([]byte{encryptedKeyVersion})
|
||||||
|
binary.Write(w, binary.BigEndian, e.KeyId)
|
||||||
|
w.Write([]byte{byte(e.Algo)})
|
||||||
|
|
||||||
|
switch e.Algo {
|
||||||
|
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
|
||||||
|
writeMPIs(w, e.encryptedMPI1)
|
||||||
|
case PubKeyAlgoElGamal:
|
||||||
|
writeMPIs(w, e.encryptedMPI1, e.encryptedMPI2)
|
||||||
|
default:
|
||||||
|
panic("internal error")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SerializeEncryptedKey serializes an encrypted key packet to w that contains
|
||||||
|
// key, encrypted to pub.
|
||||||
|
// If config is nil, sensible defaults will be used.
|
||||||
|
func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, config *Config) error {
|
||||||
|
var buf [10]byte
|
||||||
|
buf[0] = encryptedKeyVersion
|
||||||
|
binary.BigEndian.PutUint64(buf[1:9], pub.KeyId)
|
||||||
|
buf[9] = byte(pub.PubKeyAlgo)
|
||||||
|
|
||||||
|
keyBlock := make([]byte, 1 /* cipher type */ +len(key)+2 /* checksum */)
|
||||||
|
keyBlock[0] = byte(cipherFunc)
|
||||||
|
copy(keyBlock[1:], key)
|
||||||
|
checksum := checksumKeyMaterial(key)
|
||||||
|
keyBlock[1+len(key)] = byte(checksum >> 8)
|
||||||
|
keyBlock[1+len(key)+1] = byte(checksum)
|
||||||
|
|
||||||
|
switch pub.PubKeyAlgo {
|
||||||
|
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
|
||||||
|
return serializeEncryptedKeyRSA(w, config.Random(), buf, pub.PublicKey.(*rsa.PublicKey), keyBlock)
|
||||||
|
case PubKeyAlgoElGamal:
|
||||||
|
return serializeEncryptedKeyElGamal(w, config.Random(), buf, pub.PublicKey.(*elgamal.PublicKey), keyBlock)
|
||||||
|
case PubKeyAlgoDSA, PubKeyAlgoRSASignOnly:
|
||||||
|
return errors.InvalidArgumentError("cannot encrypt to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo)))
|
||||||
|
}
|
||||||
|
|
||||||
|
return errors.UnsupportedError("encrypting a key to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header [10]byte, pub *rsa.PublicKey, keyBlock []byte) error {
|
||||||
|
cipherText, err := rsa.EncryptPKCS1v15(rand, pub, keyBlock)
|
||||||
|
if err != nil {
|
||||||
|
return errors.InvalidArgumentError("RSA encryption failed: " + err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
packetLen := 10 /* header length */ + 2 /* mpi size */ + len(cipherText)
|
||||||
|
|
||||||
|
err = serializeHeader(w, packetTypeEncryptedKey, packetLen)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = w.Write(header[:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return writeMPI(w, 8*uint16(len(cipherText)), cipherText)
|
||||||
|
}
|
||||||
|
|
||||||
|
func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header [10]byte, pub *elgamal.PublicKey, keyBlock []byte) error {
|
||||||
|
c1, c2, err := elgamal.Encrypt(rand, pub, keyBlock)
|
||||||
|
if err != nil {
|
||||||
|
return errors.InvalidArgumentError("ElGamal encryption failed: " + err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
packetLen := 10 /* header length */
|
||||||
|
packetLen += 2 /* mpi size */ + (c1.BitLen()+7)/8
|
||||||
|
packetLen += 2 /* mpi size */ + (c2.BitLen()+7)/8
|
||||||
|
|
||||||
|
err = serializeHeader(w, packetTypeEncryptedKey, packetLen)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = w.Write(header[:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = writeBig(w, c1)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return writeBig(w, c2)
|
||||||
|
}
|
89
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/packet/literal.go
generated
vendored
Normal file
89
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/packet/literal.go
generated
vendored
Normal file
|
@ -0,0 +1,89 @@
|
||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package packet
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// LiteralData represents an encrypted file. See RFC 4880, section 5.9.
|
||||||
|
type LiteralData struct {
|
||||||
|
IsBinary bool
|
||||||
|
FileName string
|
||||||
|
Time uint32 // Unix epoch time. Either creation time or modification time. 0 means undefined.
|
||||||
|
Body io.Reader
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForEyesOnly returns whether the contents of the LiteralData have been marked
|
||||||
|
// as especially sensitive.
|
||||||
|
func (l *LiteralData) ForEyesOnly() bool {
|
||||||
|
return l.FileName == "_CONSOLE"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LiteralData) parse(r io.Reader) (err error) {
|
||||||
|
var buf [256]byte
|
||||||
|
|
||||||
|
_, err = readFull(r, buf[:2])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
l.IsBinary = buf[0] == 'b'
|
||||||
|
fileNameLen := int(buf[1])
|
||||||
|
|
||||||
|
_, err = readFull(r, buf[:fileNameLen])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
l.FileName = string(buf[:fileNameLen])
|
||||||
|
|
||||||
|
_, err = readFull(r, buf[:4])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
l.Time = binary.BigEndian.Uint32(buf[:4])
|
||||||
|
l.Body = r
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// SerializeLiteral serializes a literal data packet to w and returns a
|
||||||
|
// WriteCloser to which the data itself can be written and which MUST be closed
|
||||||
|
// on completion. The fileName is truncated to 255 bytes.
|
||||||
|
func SerializeLiteral(w io.WriteCloser, isBinary bool, fileName string, time uint32) (plaintext io.WriteCloser, err error) {
|
||||||
|
var buf [4]byte
|
||||||
|
buf[0] = 't'
|
||||||
|
if isBinary {
|
||||||
|
buf[0] = 'b'
|
||||||
|
}
|
||||||
|
if len(fileName) > 255 {
|
||||||
|
fileName = fileName[:255]
|
||||||
|
}
|
||||||
|
buf[1] = byte(len(fileName))
|
||||||
|
|
||||||
|
inner, err := serializeStreamHeader(w, packetTypeLiteralData)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = inner.Write(buf[:2])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_, err = inner.Write([]byte(fileName))
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
binary.BigEndian.PutUint32(buf[:], time)
|
||||||
|
_, err = inner.Write(buf[:])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
plaintext = inner
|
||||||
|
return
|
||||||
|
}
|
143
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go
generated
vendored
Normal file
143
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go
generated
vendored
Normal file
|
@ -0,0 +1,143 @@
|
||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// OpenPGP CFB Mode. http://tools.ietf.org/html/rfc4880#section-13.9
|
||||||
|
|
||||||
|
package packet
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/cipher"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ocfbEncrypter struct {
|
||||||
|
b cipher.Block
|
||||||
|
fre []byte
|
||||||
|
outUsed int
|
||||||
|
}
|
||||||
|
|
||||||
|
// An OCFBResyncOption determines if the "resynchronization step" of OCFB is
|
||||||
|
// performed.
|
||||||
|
type OCFBResyncOption bool
|
||||||
|
|
||||||
|
const (
|
||||||
|
OCFBResync OCFBResyncOption = true
|
||||||
|
OCFBNoResync OCFBResyncOption = false
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewOCFBEncrypter returns a cipher.Stream which encrypts data with OpenPGP's
|
||||||
|
// cipher feedback mode using the given cipher.Block, and an initial amount of
|
||||||
|
// ciphertext. randData must be random bytes and be the same length as the
|
||||||
|
// cipher.Block's block size. Resync determines if the "resynchronization step"
|
||||||
|
// from RFC 4880, 13.9 step 7 is performed. Different parts of OpenPGP vary on
|
||||||
|
// this point.
|
||||||
|
func NewOCFBEncrypter(block cipher.Block, randData []byte, resync OCFBResyncOption) (cipher.Stream, []byte) {
|
||||||
|
blockSize := block.BlockSize()
|
||||||
|
if len(randData) != blockSize {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
x := &ocfbEncrypter{
|
||||||
|
b: block,
|
||||||
|
fre: make([]byte, blockSize),
|
||||||
|
outUsed: 0,
|
||||||
|
}
|
||||||
|
prefix := make([]byte, blockSize+2)
|
||||||
|
|
||||||
|
block.Encrypt(x.fre, x.fre)
|
||||||
|
for i := 0; i < blockSize; i++ {
|
||||||
|
prefix[i] = randData[i] ^ x.fre[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
block.Encrypt(x.fre, prefix[:blockSize])
|
||||||
|
prefix[blockSize] = x.fre[0] ^ randData[blockSize-2]
|
||||||
|
prefix[blockSize+1] = x.fre[1] ^ randData[blockSize-1]
|
||||||
|
|
||||||
|
if resync {
|
||||||
|
block.Encrypt(x.fre, prefix[2:])
|
||||||
|
} else {
|
||||||
|
x.fre[0] = prefix[blockSize]
|
||||||
|
x.fre[1] = prefix[blockSize+1]
|
||||||
|
x.outUsed = 2
|
||||||
|
}
|
||||||
|
return x, prefix
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ocfbEncrypter) XORKeyStream(dst, src []byte) {
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
if x.outUsed == len(x.fre) {
|
||||||
|
x.b.Encrypt(x.fre, x.fre)
|
||||||
|
x.outUsed = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
x.fre[x.outUsed] ^= src[i]
|
||||||
|
dst[i] = x.fre[x.outUsed]
|
||||||
|
x.outUsed++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type ocfbDecrypter struct {
|
||||||
|
b cipher.Block
|
||||||
|
fre []byte
|
||||||
|
outUsed int
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewOCFBDecrypter returns a cipher.Stream which decrypts data with OpenPGP's
|
||||||
|
// cipher feedback mode using the given cipher.Block. Prefix must be the first
|
||||||
|
// blockSize + 2 bytes of the ciphertext, where blockSize is the cipher.Block's
|
||||||
|
// block size. If an incorrect key is detected then nil is returned. On
|
||||||
|
// successful exit, blockSize+2 bytes of decrypted data are written into
|
||||||
|
// prefix. Resync determines if the "resynchronization step" from RFC 4880,
|
||||||
|
// 13.9 step 7 is performed. Different parts of OpenPGP vary on this point.
|
||||||
|
func NewOCFBDecrypter(block cipher.Block, prefix []byte, resync OCFBResyncOption) cipher.Stream {
|
||||||
|
blockSize := block.BlockSize()
|
||||||
|
if len(prefix) != blockSize+2 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
x := &ocfbDecrypter{
|
||||||
|
b: block,
|
||||||
|
fre: make([]byte, blockSize),
|
||||||
|
outUsed: 0,
|
||||||
|
}
|
||||||
|
prefixCopy := make([]byte, len(prefix))
|
||||||
|
copy(prefixCopy, prefix)
|
||||||
|
|
||||||
|
block.Encrypt(x.fre, x.fre)
|
||||||
|
for i := 0; i < blockSize; i++ {
|
||||||
|
prefixCopy[i] ^= x.fre[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
block.Encrypt(x.fre, prefix[:blockSize])
|
||||||
|
prefixCopy[blockSize] ^= x.fre[0]
|
||||||
|
prefixCopy[blockSize+1] ^= x.fre[1]
|
||||||
|
|
||||||
|
if prefixCopy[blockSize-2] != prefixCopy[blockSize] ||
|
||||||
|
prefixCopy[blockSize-1] != prefixCopy[blockSize+1] {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if resync {
|
||||||
|
block.Encrypt(x.fre, prefix[2:])
|
||||||
|
} else {
|
||||||
|
x.fre[0] = prefix[blockSize]
|
||||||
|
x.fre[1] = prefix[blockSize+1]
|
||||||
|
x.outUsed = 2
|
||||||
|
}
|
||||||
|
copy(prefix, prefixCopy)
|
||||||
|
return x
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ocfbDecrypter) XORKeyStream(dst, src []byte) {
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
if x.outUsed == len(x.fre) {
|
||||||
|
x.b.Encrypt(x.fre, x.fre)
|
||||||
|
x.outUsed = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
c := src[i]
|
||||||
|
dst[i] = x.fre[x.outUsed] ^ src[i]
|
||||||
|
x.fre[x.outUsed] = c
|
||||||
|
x.outUsed++
|
||||||
|
}
|
||||||
|
}
|
73
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go
generated
vendored
Normal file
73
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go
generated
vendored
Normal file
|
@ -0,0 +1,73 @@
|
||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package packet
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto"
|
||||||
|
"encoding/binary"
|
||||||
|
"golang.org/x/crypto/openpgp/errors"
|
||||||
|
"golang.org/x/crypto/openpgp/s2k"
|
||||||
|
"io"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// OnePassSignature represents a one-pass signature packet. See RFC 4880,
|
||||||
|
// section 5.4.
|
||||||
|
type OnePassSignature struct {
|
||||||
|
SigType SignatureType
|
||||||
|
Hash crypto.Hash
|
||||||
|
PubKeyAlgo PublicKeyAlgorithm
|
||||||
|
KeyId uint64
|
||||||
|
IsLast bool
|
||||||
|
}
|
||||||
|
|
||||||
|
const onePassSignatureVersion = 3
|
||||||
|
|
||||||
|
func (ops *OnePassSignature) parse(r io.Reader) (err error) {
|
||||||
|
var buf [13]byte
|
||||||
|
|
||||||
|
_, err = readFull(r, buf[:])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if buf[0] != onePassSignatureVersion {
|
||||||
|
err = errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0])))
|
||||||
|
}
|
||||||
|
|
||||||
|
var ok bool
|
||||||
|
ops.Hash, ok = s2k.HashIdToHash(buf[2])
|
||||||
|
if !ok {
|
||||||
|
return errors.UnsupportedError("hash function: " + strconv.Itoa(int(buf[2])))
|
||||||
|
}
|
||||||
|
|
||||||
|
ops.SigType = SignatureType(buf[1])
|
||||||
|
ops.PubKeyAlgo = PublicKeyAlgorithm(buf[3])
|
||||||
|
ops.KeyId = binary.BigEndian.Uint64(buf[4:12])
|
||||||
|
ops.IsLast = buf[12] != 0
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Serialize marshals the given OnePassSignature to w.
|
||||||
|
func (ops *OnePassSignature) Serialize(w io.Writer) error {
|
||||||
|
var buf [13]byte
|
||||||
|
buf[0] = onePassSignatureVersion
|
||||||
|
buf[1] = uint8(ops.SigType)
|
||||||
|
var ok bool
|
||||||
|
buf[2], ok = s2k.HashToHashId(ops.Hash)
|
||||||
|
if !ok {
|
||||||
|
return errors.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash)))
|
||||||
|
}
|
||||||
|
buf[3] = uint8(ops.PubKeyAlgo)
|
||||||
|
binary.BigEndian.PutUint64(buf[4:12], ops.KeyId)
|
||||||
|
if ops.IsLast {
|
||||||
|
buf[12] = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := serializeHeader(w, packetTypeOnePassSignature, len(buf)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err := w.Write(buf[:])
|
||||||
|
return err
|
||||||
|
}
|
162
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/packet/opaque.go
generated
vendored
Normal file
162
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/packet/opaque.go
generated
vendored
Normal file
|
@ -0,0 +1,162 @@
|
||||||
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package packet
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
|
||||||
|
"golang.org/x/crypto/openpgp/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// OpaquePacket represents an OpenPGP packet as raw, unparsed data. This is
|
||||||
|
// useful for splitting and storing the original packet contents separately,
|
||||||
|
// handling unsupported packet types or accessing parts of the packet not yet
|
||||||
|
// implemented by this package.
|
||||||
|
type OpaquePacket struct {
|
||||||
|
// Packet type
|
||||||
|
Tag uint8
|
||||||
|
// Reason why the packet was parsed opaquely
|
||||||
|
Reason error
|
||||||
|
// Binary contents of the packet data
|
||||||
|
Contents []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (op *OpaquePacket) parse(r io.Reader) (err error) {
|
||||||
|
op.Contents, err = ioutil.ReadAll(r)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Serialize marshals the packet to a writer in its original form, including
|
||||||
|
// the packet header.
|
||||||
|
func (op *OpaquePacket) Serialize(w io.Writer) (err error) {
|
||||||
|
err = serializeHeader(w, packetType(op.Tag), len(op.Contents))
|
||||||
|
if err == nil {
|
||||||
|
_, err = w.Write(op.Contents)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse attempts to parse the opaque contents into a structure supported by
|
||||||
|
// this package. If the packet is not known then the result will be another
|
||||||
|
// OpaquePacket.
|
||||||
|
func (op *OpaquePacket) Parse() (p Packet, err error) {
|
||||||
|
hdr := bytes.NewBuffer(nil)
|
||||||
|
err = serializeHeader(hdr, packetType(op.Tag), len(op.Contents))
|
||||||
|
if err != nil {
|
||||||
|
op.Reason = err
|
||||||
|
return op, err
|
||||||
|
}
|
||||||
|
p, err = Read(io.MultiReader(hdr, bytes.NewBuffer(op.Contents)))
|
||||||
|
if err != nil {
|
||||||
|
op.Reason = err
|
||||||
|
p = op
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpaqueReader reads OpaquePackets from an io.Reader.
|
||||||
|
type OpaqueReader struct {
|
||||||
|
r io.Reader
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewOpaqueReader(r io.Reader) *OpaqueReader {
|
||||||
|
return &OpaqueReader{r: r}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read the next OpaquePacket.
|
||||||
|
func (or *OpaqueReader) Next() (op *OpaquePacket, err error) {
|
||||||
|
tag, _, contents, err := readHeader(or.r)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
op = &OpaquePacket{Tag: uint8(tag), Reason: err}
|
||||||
|
err = op.parse(contents)
|
||||||
|
if err != nil {
|
||||||
|
consumeAll(contents)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpaqueSubpacket represents an unparsed OpenPGP subpacket,
|
||||||
|
// as found in signature and user attribute packets.
|
||||||
|
type OpaqueSubpacket struct {
|
||||||
|
SubType uint8
|
||||||
|
Contents []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpaqueSubpackets extracts opaque, unparsed OpenPGP subpackets from
|
||||||
|
// their byte representation.
|
||||||
|
func OpaqueSubpackets(contents []byte) (result []*OpaqueSubpacket, err error) {
|
||||||
|
var (
|
||||||
|
subHeaderLen int
|
||||||
|
subPacket *OpaqueSubpacket
|
||||||
|
)
|
||||||
|
for len(contents) > 0 {
|
||||||
|
subHeaderLen, subPacket, err = nextSubpacket(contents)
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
result = append(result, subPacket)
|
||||||
|
contents = contents[subHeaderLen+len(subPacket.Contents):]
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacket, err error) {
|
||||||
|
// RFC 4880, section 5.2.3.1
|
||||||
|
var subLen uint32
|
||||||
|
if len(contents) < 1 {
|
||||||
|
goto Truncated
|
||||||
|
}
|
||||||
|
subPacket = &OpaqueSubpacket{}
|
||||||
|
switch {
|
||||||
|
case contents[0] < 192:
|
||||||
|
subHeaderLen = 2 // 1 length byte, 1 subtype byte
|
||||||
|
if len(contents) < subHeaderLen {
|
||||||
|
goto Truncated
|
||||||
|
}
|
||||||
|
subLen = uint32(contents[0])
|
||||||
|
contents = contents[1:]
|
||||||
|
case contents[0] < 255:
|
||||||
|
subHeaderLen = 3 // 2 length bytes, 1 subtype
|
||||||
|
if len(contents) < subHeaderLen {
|
||||||
|
goto Truncated
|
||||||
|
}
|
||||||
|
subLen = uint32(contents[0]-192)<<8 + uint32(contents[1]) + 192
|
||||||
|
contents = contents[2:]
|
||||||
|
default:
|
||||||
|
subHeaderLen = 6 // 5 length bytes, 1 subtype
|
||||||
|
if len(contents) < subHeaderLen {
|
||||||
|
goto Truncated
|
||||||
|
}
|
||||||
|
subLen = uint32(contents[1])<<24 |
|
||||||
|
uint32(contents[2])<<16 |
|
||||||
|
uint32(contents[3])<<8 |
|
||||||
|
uint32(contents[4])
|
||||||
|
contents = contents[5:]
|
||||||
|
}
|
||||||
|
if subLen > uint32(len(contents)) || subLen == 0 {
|
||||||
|
goto Truncated
|
||||||
|
}
|
||||||
|
subPacket.SubType = contents[0]
|
||||||
|
subPacket.Contents = contents[1:subLen]
|
||||||
|
return
|
||||||
|
Truncated:
|
||||||
|
err = errors.StructuralError("subpacket truncated")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (osp *OpaqueSubpacket) Serialize(w io.Writer) (err error) {
|
||||||
|
buf := make([]byte, 6)
|
||||||
|
n := serializeSubpacketLength(buf, len(osp.Contents)+1)
|
||||||
|
buf[n] = osp.SubType
|
||||||
|
if _, err = w.Write(buf[:n+1]); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_, err = w.Write(osp.Contents)
|
||||||
|
return
|
||||||
|
}
|
537
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/packet/packet.go
generated
vendored
Normal file
537
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/packet/packet.go
generated
vendored
Normal file
|
@ -0,0 +1,537 @@
|
||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package packet implements parsing and serialization of OpenPGP packets, as
|
||||||
|
// specified in RFC 4880.
|
||||||
|
package packet // import "golang.org/x/crypto/openpgp/packet"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"crypto/aes"
|
||||||
|
"crypto/cipher"
|
||||||
|
"crypto/des"
|
||||||
|
"golang.org/x/crypto/cast5"
|
||||||
|
"golang.org/x/crypto/openpgp/errors"
|
||||||
|
"io"
|
||||||
|
"math/big"
|
||||||
|
)
|
||||||
|
|
||||||
|
// readFull is the same as io.ReadFull except that reading zero bytes returns
|
||||||
|
// ErrUnexpectedEOF rather than EOF.
|
||||||
|
func readFull(r io.Reader, buf []byte) (n int, err error) {
|
||||||
|
n, err = io.ReadFull(r, buf)
|
||||||
|
if err == io.EOF {
|
||||||
|
err = io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// readLength reads an OpenPGP length from r. See RFC 4880, section 4.2.2.
|
||||||
|
func readLength(r io.Reader) (length int64, isPartial bool, err error) {
|
||||||
|
var buf [4]byte
|
||||||
|
_, err = readFull(r, buf[:1])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch {
|
||||||
|
case buf[0] < 192:
|
||||||
|
length = int64(buf[0])
|
||||||
|
case buf[0] < 224:
|
||||||
|
length = int64(buf[0]-192) << 8
|
||||||
|
_, err = readFull(r, buf[0:1])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
length += int64(buf[0]) + 192
|
||||||
|
case buf[0] < 255:
|
||||||
|
length = int64(1) << (buf[0] & 0x1f)
|
||||||
|
isPartial = true
|
||||||
|
default:
|
||||||
|
_, err = readFull(r, buf[0:4])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
length = int64(buf[0])<<24 |
|
||||||
|
int64(buf[1])<<16 |
|
||||||
|
int64(buf[2])<<8 |
|
||||||
|
int64(buf[3])
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// partialLengthReader wraps an io.Reader and handles OpenPGP partial lengths.
|
||||||
|
// The continuation lengths are parsed and removed from the stream and EOF is
|
||||||
|
// returned at the end of the packet. See RFC 4880, section 4.2.2.4.
|
||||||
|
type partialLengthReader struct {
|
||||||
|
r io.Reader
|
||||||
|
remaining int64
|
||||||
|
isPartial bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *partialLengthReader) Read(p []byte) (n int, err error) {
|
||||||
|
for r.remaining == 0 {
|
||||||
|
if !r.isPartial {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
r.remaining, r.isPartial, err = readLength(r.r)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
toRead := int64(len(p))
|
||||||
|
if toRead > r.remaining {
|
||||||
|
toRead = r.remaining
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err = r.r.Read(p[:int(toRead)])
|
||||||
|
r.remaining -= int64(n)
|
||||||
|
if n < int(toRead) && err == io.EOF {
|
||||||
|
err = io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// partialLengthWriter writes a stream of data using OpenPGP partial lengths.
|
||||||
|
// See RFC 4880, section 4.2.2.4.
|
||||||
|
type partialLengthWriter struct {
|
||||||
|
w io.WriteCloser
|
||||||
|
lengthByte [1]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *partialLengthWriter) Write(p []byte) (n int, err error) {
|
||||||
|
for len(p) > 0 {
|
||||||
|
for power := uint(14); power < 32; power-- {
|
||||||
|
l := 1 << power
|
||||||
|
if len(p) >= l {
|
||||||
|
w.lengthByte[0] = 224 + uint8(power)
|
||||||
|
_, err = w.w.Write(w.lengthByte[:])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var m int
|
||||||
|
m, err = w.w.Write(p[:l])
|
||||||
|
n += m
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
p = p[l:]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *partialLengthWriter) Close() error {
|
||||||
|
w.lengthByte[0] = 0
|
||||||
|
_, err := w.w.Write(w.lengthByte[:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return w.w.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// A spanReader is an io.LimitReader, but it returns ErrUnexpectedEOF if the
|
||||||
|
// underlying Reader returns EOF before the limit has been reached.
|
||||||
|
type spanReader struct {
|
||||||
|
r io.Reader
|
||||||
|
n int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *spanReader) Read(p []byte) (n int, err error) {
|
||||||
|
if l.n <= 0 {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
if int64(len(p)) > l.n {
|
||||||
|
p = p[0:l.n]
|
||||||
|
}
|
||||||
|
n, err = l.r.Read(p)
|
||||||
|
l.n -= int64(n)
|
||||||
|
if l.n > 0 && err == io.EOF {
|
||||||
|
err = io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// readHeader parses a packet header and returns an io.Reader which will return
|
||||||
|
// the contents of the packet. See RFC 4880, section 4.2.
|
||||||
|
func readHeader(r io.Reader) (tag packetType, length int64, contents io.Reader, err error) {
|
||||||
|
var buf [4]byte
|
||||||
|
_, err = io.ReadFull(r, buf[:1])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if buf[0]&0x80 == 0 {
|
||||||
|
err = errors.StructuralError("tag byte does not have MSB set")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if buf[0]&0x40 == 0 {
|
||||||
|
// Old format packet
|
||||||
|
tag = packetType((buf[0] & 0x3f) >> 2)
|
||||||
|
lengthType := buf[0] & 3
|
||||||
|
if lengthType == 3 {
|
||||||
|
length = -1
|
||||||
|
contents = r
|
||||||
|
return
|
||||||
|
}
|
||||||
|
lengthBytes := 1 << lengthType
|
||||||
|
_, err = readFull(r, buf[0:lengthBytes])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for i := 0; i < lengthBytes; i++ {
|
||||||
|
length <<= 8
|
||||||
|
length |= int64(buf[i])
|
||||||
|
}
|
||||||
|
contents = &spanReader{r, length}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// New format packet
|
||||||
|
tag = packetType(buf[0] & 0x3f)
|
||||||
|
length, isPartial, err := readLength(r)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if isPartial {
|
||||||
|
contents = &partialLengthReader{
|
||||||
|
remaining: length,
|
||||||
|
isPartial: true,
|
||||||
|
r: r,
|
||||||
|
}
|
||||||
|
length = -1
|
||||||
|
} else {
|
||||||
|
contents = &spanReader{r, length}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// serializeHeader writes an OpenPGP packet header to w. See RFC 4880, section
|
||||||
|
// 4.2.
|
||||||
|
func serializeHeader(w io.Writer, ptype packetType, length int) (err error) {
|
||||||
|
var buf [6]byte
|
||||||
|
var n int
|
||||||
|
|
||||||
|
buf[0] = 0x80 | 0x40 | byte(ptype)
|
||||||
|
if length < 192 {
|
||||||
|
buf[1] = byte(length)
|
||||||
|
n = 2
|
||||||
|
} else if length < 8384 {
|
||||||
|
length -= 192
|
||||||
|
buf[1] = 192 + byte(length>>8)
|
||||||
|
buf[2] = byte(length)
|
||||||
|
n = 3
|
||||||
|
} else {
|
||||||
|
buf[1] = 255
|
||||||
|
buf[2] = byte(length >> 24)
|
||||||
|
buf[3] = byte(length >> 16)
|
||||||
|
buf[4] = byte(length >> 8)
|
||||||
|
buf[5] = byte(length)
|
||||||
|
n = 6
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = w.Write(buf[:n])
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// serializeStreamHeader writes an OpenPGP packet header to w where the
|
||||||
|
// length of the packet is unknown. It returns a io.WriteCloser which can be
|
||||||
|
// used to write the contents of the packet. See RFC 4880, section 4.2.
|
||||||
|
func serializeStreamHeader(w io.WriteCloser, ptype packetType) (out io.WriteCloser, err error) {
|
||||||
|
var buf [1]byte
|
||||||
|
buf[0] = 0x80 | 0x40 | byte(ptype)
|
||||||
|
_, err = w.Write(buf[:])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
out = &partialLengthWriter{w: w}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Packet represents an OpenPGP packet. Users are expected to try casting
|
||||||
|
// instances of this interface to specific packet types.
|
||||||
|
type Packet interface {
|
||||||
|
parse(io.Reader) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// consumeAll reads from the given Reader until error, returning the number of
|
||||||
|
// bytes read.
|
||||||
|
func consumeAll(r io.Reader) (n int64, err error) {
|
||||||
|
var m int
|
||||||
|
var buf [1024]byte
|
||||||
|
|
||||||
|
for {
|
||||||
|
m, err = r.Read(buf[:])
|
||||||
|
n += int64(m)
|
||||||
|
if err == io.EOF {
|
||||||
|
err = nil
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// packetType represents the numeric ids of the different OpenPGP packet types. See
|
||||||
|
// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-2
|
||||||
|
type packetType uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
packetTypeEncryptedKey packetType = 1
|
||||||
|
packetTypeSignature packetType = 2
|
||||||
|
packetTypeSymmetricKeyEncrypted packetType = 3
|
||||||
|
packetTypeOnePassSignature packetType = 4
|
||||||
|
packetTypePrivateKey packetType = 5
|
||||||
|
packetTypePublicKey packetType = 6
|
||||||
|
packetTypePrivateSubkey packetType = 7
|
||||||
|
packetTypeCompressed packetType = 8
|
||||||
|
packetTypeSymmetricallyEncrypted packetType = 9
|
||||||
|
packetTypeLiteralData packetType = 11
|
||||||
|
packetTypeUserId packetType = 13
|
||||||
|
packetTypePublicSubkey packetType = 14
|
||||||
|
packetTypeUserAttribute packetType = 17
|
||||||
|
packetTypeSymmetricallyEncryptedMDC packetType = 18
|
||||||
|
)
|
||||||
|
|
||||||
|
// peekVersion detects the version of a public key packet about to
|
||||||
|
// be read. A bufio.Reader at the original position of the io.Reader
|
||||||
|
// is returned.
|
||||||
|
func peekVersion(r io.Reader) (bufr *bufio.Reader, ver byte, err error) {
|
||||||
|
bufr = bufio.NewReader(r)
|
||||||
|
var verBuf []byte
|
||||||
|
if verBuf, err = bufr.Peek(1); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ver = verBuf[0]
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read reads a single OpenPGP packet from the given io.Reader. If there is an
|
||||||
|
// error parsing a packet, the whole packet is consumed from the input.
|
||||||
|
func Read(r io.Reader) (p Packet, err error) {
|
||||||
|
tag, _, contents, err := readHeader(r)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
switch tag {
|
||||||
|
case packetTypeEncryptedKey:
|
||||||
|
p = new(EncryptedKey)
|
||||||
|
case packetTypeSignature:
|
||||||
|
var version byte
|
||||||
|
// Detect signature version
|
||||||
|
if contents, version, err = peekVersion(contents); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if version < 4 {
|
||||||
|
p = new(SignatureV3)
|
||||||
|
} else {
|
||||||
|
p = new(Signature)
|
||||||
|
}
|
||||||
|
case packetTypeSymmetricKeyEncrypted:
|
||||||
|
p = new(SymmetricKeyEncrypted)
|
||||||
|
case packetTypeOnePassSignature:
|
||||||
|
p = new(OnePassSignature)
|
||||||
|
case packetTypePrivateKey, packetTypePrivateSubkey:
|
||||||
|
pk := new(PrivateKey)
|
||||||
|
if tag == packetTypePrivateSubkey {
|
||||||
|
pk.IsSubkey = true
|
||||||
|
}
|
||||||
|
p = pk
|
||||||
|
case packetTypePublicKey, packetTypePublicSubkey:
|
||||||
|
var version byte
|
||||||
|
if contents, version, err = peekVersion(contents); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
isSubkey := tag == packetTypePublicSubkey
|
||||||
|
if version < 4 {
|
||||||
|
p = &PublicKeyV3{IsSubkey: isSubkey}
|
||||||
|
} else {
|
||||||
|
p = &PublicKey{IsSubkey: isSubkey}
|
||||||
|
}
|
||||||
|
case packetTypeCompressed:
|
||||||
|
p = new(Compressed)
|
||||||
|
case packetTypeSymmetricallyEncrypted:
|
||||||
|
p = new(SymmetricallyEncrypted)
|
||||||
|
case packetTypeLiteralData:
|
||||||
|
p = new(LiteralData)
|
||||||
|
case packetTypeUserId:
|
||||||
|
p = new(UserId)
|
||||||
|
case packetTypeUserAttribute:
|
||||||
|
p = new(UserAttribute)
|
||||||
|
case packetTypeSymmetricallyEncryptedMDC:
|
||||||
|
se := new(SymmetricallyEncrypted)
|
||||||
|
se.MDC = true
|
||||||
|
p = se
|
||||||
|
default:
|
||||||
|
err = errors.UnknownPacketTypeError(tag)
|
||||||
|
}
|
||||||
|
if p != nil {
|
||||||
|
err = p.parse(contents)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
consumeAll(contents)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// SignatureType represents the different semantic meanings of an OpenPGP
|
||||||
|
// signature. See RFC 4880, section 5.2.1.
|
||||||
|
type SignatureType uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
SigTypeBinary SignatureType = 0
|
||||||
|
SigTypeText = 1
|
||||||
|
SigTypeGenericCert = 0x10
|
||||||
|
SigTypePersonaCert = 0x11
|
||||||
|
SigTypeCasualCert = 0x12
|
||||||
|
SigTypePositiveCert = 0x13
|
||||||
|
SigTypeSubkeyBinding = 0x18
|
||||||
|
SigTypePrimaryKeyBinding = 0x19
|
||||||
|
SigTypeDirectSignature = 0x1F
|
||||||
|
SigTypeKeyRevocation = 0x20
|
||||||
|
SigTypeSubkeyRevocation = 0x28
|
||||||
|
)
|
||||||
|
|
||||||
|
// PublicKeyAlgorithm represents the different public key system specified for
|
||||||
|
// OpenPGP. See
|
||||||
|
// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-12
|
||||||
|
type PublicKeyAlgorithm uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
PubKeyAlgoRSA PublicKeyAlgorithm = 1
|
||||||
|
PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2
|
||||||
|
PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3
|
||||||
|
PubKeyAlgoElGamal PublicKeyAlgorithm = 16
|
||||||
|
PubKeyAlgoDSA PublicKeyAlgorithm = 17
|
||||||
|
// RFC 6637, Section 5.
|
||||||
|
PubKeyAlgoECDH PublicKeyAlgorithm = 18
|
||||||
|
PubKeyAlgoECDSA PublicKeyAlgorithm = 19
|
||||||
|
)
|
||||||
|
|
||||||
|
// CanEncrypt returns true if it's possible to encrypt a message to a public
|
||||||
|
// key of the given type.
|
||||||
|
func (pka PublicKeyAlgorithm) CanEncrypt() bool {
|
||||||
|
switch pka {
|
||||||
|
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal:
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// CanSign returns true if it's possible for a public key of the given type to
|
||||||
|
// sign a message.
|
||||||
|
func (pka PublicKeyAlgorithm) CanSign() bool {
|
||||||
|
switch pka {
|
||||||
|
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA:
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// CipherFunction represents the different block ciphers specified for OpenPGP. See
|
||||||
|
// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13
|
||||||
|
type CipherFunction uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
Cipher3DES CipherFunction = 2
|
||||||
|
CipherCAST5 CipherFunction = 3
|
||||||
|
CipherAES128 CipherFunction = 7
|
||||||
|
CipherAES192 CipherFunction = 8
|
||||||
|
CipherAES256 CipherFunction = 9
|
||||||
|
)
|
||||||
|
|
||||||
|
// KeySize returns the key size, in bytes, of cipher.
|
||||||
|
func (cipher CipherFunction) KeySize() int {
|
||||||
|
switch cipher {
|
||||||
|
case Cipher3DES:
|
||||||
|
return 24
|
||||||
|
case CipherCAST5:
|
||||||
|
return cast5.KeySize
|
||||||
|
case CipherAES128:
|
||||||
|
return 16
|
||||||
|
case CipherAES192:
|
||||||
|
return 24
|
||||||
|
case CipherAES256:
|
||||||
|
return 32
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// blockSize returns the block size, in bytes, of cipher.
|
||||||
|
func (cipher CipherFunction) blockSize() int {
|
||||||
|
switch cipher {
|
||||||
|
case Cipher3DES:
|
||||||
|
return des.BlockSize
|
||||||
|
case CipherCAST5:
|
||||||
|
return 8
|
||||||
|
case CipherAES128, CipherAES192, CipherAES256:
|
||||||
|
return 16
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// new returns a fresh instance of the given cipher.
|
||||||
|
func (cipher CipherFunction) new(key []byte) (block cipher.Block) {
|
||||||
|
switch cipher {
|
||||||
|
case Cipher3DES:
|
||||||
|
block, _ = des.NewTripleDESCipher(key)
|
||||||
|
case CipherCAST5:
|
||||||
|
block, _ = cast5.NewCipher(key)
|
||||||
|
case CipherAES128, CipherAES192, CipherAES256:
|
||||||
|
block, _ = aes.NewCipher(key)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// readMPI reads a big integer from r. The bit length returned is the bit
|
||||||
|
// length that was specified in r. This is preserved so that the integer can be
|
||||||
|
// reserialized exactly.
|
||||||
|
func readMPI(r io.Reader) (mpi []byte, bitLength uint16, err error) {
|
||||||
|
var buf [2]byte
|
||||||
|
_, err = readFull(r, buf[0:])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
bitLength = uint16(buf[0])<<8 | uint16(buf[1])
|
||||||
|
numBytes := (int(bitLength) + 7) / 8
|
||||||
|
mpi = make([]byte, numBytes)
|
||||||
|
_, err = readFull(r, mpi)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// mpiLength returns the length of the given *big.Int when serialized as an
|
||||||
|
// MPI.
|
||||||
|
func mpiLength(n *big.Int) (mpiLengthInBytes int) {
|
||||||
|
mpiLengthInBytes = 2 /* MPI length */
|
||||||
|
mpiLengthInBytes += (n.BitLen() + 7) / 8
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeMPI serializes a big integer to w.
|
||||||
|
func writeMPI(w io.Writer, bitLength uint16, mpiBytes []byte) (err error) {
|
||||||
|
_, err = w.Write([]byte{byte(bitLength >> 8), byte(bitLength)})
|
||||||
|
if err == nil {
|
||||||
|
_, err = w.Write(mpiBytes)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeBig serializes a *big.Int to w.
|
||||||
|
func writeBig(w io.Writer, i *big.Int) error {
|
||||||
|
return writeMPI(w, uint16(i.BitLen()), i.Bytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
// CompressionAlgo Represents the different compression algorithms
|
||||||
|
// supported by OpenPGP (except for BZIP2, which is not currently
|
||||||
|
// supported). See Section 9.3 of RFC 4880.
|
||||||
|
type CompressionAlgo uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
CompressionNone CompressionAlgo = 0
|
||||||
|
CompressionZIP CompressionAlgo = 1
|
||||||
|
CompressionZLIB CompressionAlgo = 2
|
||||||
|
)
|
380
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/packet/private_key.go
generated
vendored
Normal file
380
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/packet/private_key.go
generated
vendored
Normal file
|
@ -0,0 +1,380 @@
|
||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package packet
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto"
|
||||||
|
"crypto/cipher"
|
||||||
|
"crypto/dsa"
|
||||||
|
"crypto/ecdsa"
|
||||||
|
"crypto/rsa"
|
||||||
|
"crypto/sha1"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"math/big"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/crypto/openpgp/elgamal"
|
||||||
|
"golang.org/x/crypto/openpgp/errors"
|
||||||
|
"golang.org/x/crypto/openpgp/s2k"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PrivateKey represents a possibly encrypted private key. See RFC 4880,
|
||||||
|
// section 5.5.3.
|
||||||
|
type PrivateKey struct {
|
||||||
|
PublicKey
|
||||||
|
Encrypted bool // if true then the private key is unavailable until Decrypt has been called.
|
||||||
|
encryptedData []byte
|
||||||
|
cipher CipherFunction
|
||||||
|
s2k func(out, in []byte)
|
||||||
|
PrivateKey interface{} // An *{rsa|dsa|ecdsa}.PrivateKey or a crypto.Signer.
|
||||||
|
sha1Checksum bool
|
||||||
|
iv []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewRSAPrivateKey(currentTime time.Time, priv *rsa.PrivateKey) *PrivateKey {
|
||||||
|
pk := new(PrivateKey)
|
||||||
|
pk.PublicKey = *NewRSAPublicKey(currentTime, &priv.PublicKey)
|
||||||
|
pk.PrivateKey = priv
|
||||||
|
return pk
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDSAPrivateKey(currentTime time.Time, priv *dsa.PrivateKey) *PrivateKey {
|
||||||
|
pk := new(PrivateKey)
|
||||||
|
pk.PublicKey = *NewDSAPublicKey(currentTime, &priv.PublicKey)
|
||||||
|
pk.PrivateKey = priv
|
||||||
|
return pk
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewElGamalPrivateKey(currentTime time.Time, priv *elgamal.PrivateKey) *PrivateKey {
|
||||||
|
pk := new(PrivateKey)
|
||||||
|
pk.PublicKey = *NewElGamalPublicKey(currentTime, &priv.PublicKey)
|
||||||
|
pk.PrivateKey = priv
|
||||||
|
return pk
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewECDSAPrivateKey(currentTime time.Time, priv *ecdsa.PrivateKey) *PrivateKey {
|
||||||
|
pk := new(PrivateKey)
|
||||||
|
pk.PublicKey = *NewECDSAPublicKey(currentTime, &priv.PublicKey)
|
||||||
|
pk.PrivateKey = priv
|
||||||
|
return pk
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSignerPrivateKey creates a sign-only PrivateKey from a crypto.Signer that
|
||||||
|
// implements RSA or ECDSA.
|
||||||
|
func NewSignerPrivateKey(currentTime time.Time, signer crypto.Signer) *PrivateKey {
|
||||||
|
pk := new(PrivateKey)
|
||||||
|
switch pubkey := signer.Public().(type) {
|
||||||
|
case rsa.PublicKey:
|
||||||
|
pk.PublicKey = *NewRSAPublicKey(currentTime, &pubkey)
|
||||||
|
pk.PubKeyAlgo = PubKeyAlgoRSASignOnly
|
||||||
|
case ecdsa.PublicKey:
|
||||||
|
pk.PublicKey = *NewECDSAPublicKey(currentTime, &pubkey)
|
||||||
|
default:
|
||||||
|
panic("openpgp: unknown crypto.Signer type in NewSignerPrivateKey")
|
||||||
|
}
|
||||||
|
pk.PrivateKey = signer
|
||||||
|
return pk
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pk *PrivateKey) parse(r io.Reader) (err error) {
|
||||||
|
err = (&pk.PublicKey).parse(r)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var buf [1]byte
|
||||||
|
_, err = readFull(r, buf[:])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
s2kType := buf[0]
|
||||||
|
|
||||||
|
switch s2kType {
|
||||||
|
case 0:
|
||||||
|
pk.s2k = nil
|
||||||
|
pk.Encrypted = false
|
||||||
|
case 254, 255:
|
||||||
|
_, err = readFull(r, buf[:])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
pk.cipher = CipherFunction(buf[0])
|
||||||
|
pk.Encrypted = true
|
||||||
|
pk.s2k, err = s2k.Parse(r)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if s2kType == 254 {
|
||||||
|
pk.sha1Checksum = true
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return errors.UnsupportedError("deprecated s2k function in private key")
|
||||||
|
}
|
||||||
|
|
||||||
|
if pk.Encrypted {
|
||||||
|
blockSize := pk.cipher.blockSize()
|
||||||
|
if blockSize == 0 {
|
||||||
|
return errors.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher)))
|
||||||
|
}
|
||||||
|
pk.iv = make([]byte, blockSize)
|
||||||
|
_, err = readFull(r, pk.iv)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pk.encryptedData, err = ioutil.ReadAll(r)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if !pk.Encrypted {
|
||||||
|
return pk.parsePrivateKey(pk.encryptedData)
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func mod64kHash(d []byte) uint16 {
|
||||||
|
var h uint16
|
||||||
|
for _, b := range d {
|
||||||
|
h += uint16(b)
|
||||||
|
}
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pk *PrivateKey) Serialize(w io.Writer) (err error) {
|
||||||
|
// TODO(agl): support encrypted private keys
|
||||||
|
buf := bytes.NewBuffer(nil)
|
||||||
|
err = pk.PublicKey.serializeWithoutHeaders(buf)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
buf.WriteByte(0 /* no encryption */)
|
||||||
|
|
||||||
|
privateKeyBuf := bytes.NewBuffer(nil)
|
||||||
|
|
||||||
|
switch priv := pk.PrivateKey.(type) {
|
||||||
|
case *rsa.PrivateKey:
|
||||||
|
err = serializeRSAPrivateKey(privateKeyBuf, priv)
|
||||||
|
case *dsa.PrivateKey:
|
||||||
|
err = serializeDSAPrivateKey(privateKeyBuf, priv)
|
||||||
|
case *elgamal.PrivateKey:
|
||||||
|
err = serializeElGamalPrivateKey(privateKeyBuf, priv)
|
||||||
|
case *ecdsa.PrivateKey:
|
||||||
|
err = serializeECDSAPrivateKey(privateKeyBuf, priv)
|
||||||
|
default:
|
||||||
|
err = errors.InvalidArgumentError("unknown private key type")
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ptype := packetTypePrivateKey
|
||||||
|
contents := buf.Bytes()
|
||||||
|
privateKeyBytes := privateKeyBuf.Bytes()
|
||||||
|
if pk.IsSubkey {
|
||||||
|
ptype = packetTypePrivateSubkey
|
||||||
|
}
|
||||||
|
err = serializeHeader(w, ptype, len(contents)+len(privateKeyBytes)+2)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_, err = w.Write(contents)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_, err = w.Write(privateKeyBytes)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
checksum := mod64kHash(privateKeyBytes)
|
||||||
|
var checksumBytes [2]byte
|
||||||
|
checksumBytes[0] = byte(checksum >> 8)
|
||||||
|
checksumBytes[1] = byte(checksum)
|
||||||
|
_, err = w.Write(checksumBytes[:])
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func serializeRSAPrivateKey(w io.Writer, priv *rsa.PrivateKey) error {
|
||||||
|
err := writeBig(w, priv.D)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = writeBig(w, priv.Primes[1])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = writeBig(w, priv.Primes[0])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return writeBig(w, priv.Precomputed.Qinv)
|
||||||
|
}
|
||||||
|
|
||||||
|
func serializeDSAPrivateKey(w io.Writer, priv *dsa.PrivateKey) error {
|
||||||
|
return writeBig(w, priv.X)
|
||||||
|
}
|
||||||
|
|
||||||
|
func serializeElGamalPrivateKey(w io.Writer, priv *elgamal.PrivateKey) error {
|
||||||
|
return writeBig(w, priv.X)
|
||||||
|
}
|
||||||
|
|
||||||
|
func serializeECDSAPrivateKey(w io.Writer, priv *ecdsa.PrivateKey) error {
|
||||||
|
return writeBig(w, priv.D)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decrypt decrypts an encrypted private key using a passphrase.
|
||||||
|
func (pk *PrivateKey) Decrypt(passphrase []byte) error {
|
||||||
|
if !pk.Encrypted {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
key := make([]byte, pk.cipher.KeySize())
|
||||||
|
pk.s2k(key, passphrase)
|
||||||
|
block := pk.cipher.new(key)
|
||||||
|
cfb := cipher.NewCFBDecrypter(block, pk.iv)
|
||||||
|
|
||||||
|
data := make([]byte, len(pk.encryptedData))
|
||||||
|
cfb.XORKeyStream(data, pk.encryptedData)
|
||||||
|
|
||||||
|
if pk.sha1Checksum {
|
||||||
|
if len(data) < sha1.Size {
|
||||||
|
return errors.StructuralError("truncated private key data")
|
||||||
|
}
|
||||||
|
h := sha1.New()
|
||||||
|
h.Write(data[:len(data)-sha1.Size])
|
||||||
|
sum := h.Sum(nil)
|
||||||
|
if !bytes.Equal(sum, data[len(data)-sha1.Size:]) {
|
||||||
|
return errors.StructuralError("private key checksum failure")
|
||||||
|
}
|
||||||
|
data = data[:len(data)-sha1.Size]
|
||||||
|
} else {
|
||||||
|
if len(data) < 2 {
|
||||||
|
return errors.StructuralError("truncated private key data")
|
||||||
|
}
|
||||||
|
var sum uint16
|
||||||
|
for i := 0; i < len(data)-2; i++ {
|
||||||
|
sum += uint16(data[i])
|
||||||
|
}
|
||||||
|
if data[len(data)-2] != uint8(sum>>8) ||
|
||||||
|
data[len(data)-1] != uint8(sum) {
|
||||||
|
return errors.StructuralError("private key checksum failure")
|
||||||
|
}
|
||||||
|
data = data[:len(data)-2]
|
||||||
|
}
|
||||||
|
|
||||||
|
return pk.parsePrivateKey(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pk *PrivateKey) parsePrivateKey(data []byte) (err error) {
|
||||||
|
switch pk.PublicKey.PubKeyAlgo {
|
||||||
|
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoRSAEncryptOnly:
|
||||||
|
return pk.parseRSAPrivateKey(data)
|
||||||
|
case PubKeyAlgoDSA:
|
||||||
|
return pk.parseDSAPrivateKey(data)
|
||||||
|
case PubKeyAlgoElGamal:
|
||||||
|
return pk.parseElGamalPrivateKey(data)
|
||||||
|
case PubKeyAlgoECDSA:
|
||||||
|
return pk.parseECDSAPrivateKey(data)
|
||||||
|
}
|
||||||
|
panic("impossible")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pk *PrivateKey) parseRSAPrivateKey(data []byte) (err error) {
|
||||||
|
rsaPub := pk.PublicKey.PublicKey.(*rsa.PublicKey)
|
||||||
|
rsaPriv := new(rsa.PrivateKey)
|
||||||
|
rsaPriv.PublicKey = *rsaPub
|
||||||
|
|
||||||
|
buf := bytes.NewBuffer(data)
|
||||||
|
d, _, err := readMPI(buf)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
p, _, err := readMPI(buf)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
q, _, err := readMPI(buf)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
rsaPriv.D = new(big.Int).SetBytes(d)
|
||||||
|
rsaPriv.Primes = make([]*big.Int, 2)
|
||||||
|
rsaPriv.Primes[0] = new(big.Int).SetBytes(p)
|
||||||
|
rsaPriv.Primes[1] = new(big.Int).SetBytes(q)
|
||||||
|
if err := rsaPriv.Validate(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rsaPriv.Precompute()
|
||||||
|
pk.PrivateKey = rsaPriv
|
||||||
|
pk.Encrypted = false
|
||||||
|
pk.encryptedData = nil
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pk *PrivateKey) parseDSAPrivateKey(data []byte) (err error) {
|
||||||
|
dsaPub := pk.PublicKey.PublicKey.(*dsa.PublicKey)
|
||||||
|
dsaPriv := new(dsa.PrivateKey)
|
||||||
|
dsaPriv.PublicKey = *dsaPub
|
||||||
|
|
||||||
|
buf := bytes.NewBuffer(data)
|
||||||
|
x, _, err := readMPI(buf)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
dsaPriv.X = new(big.Int).SetBytes(x)
|
||||||
|
pk.PrivateKey = dsaPriv
|
||||||
|
pk.Encrypted = false
|
||||||
|
pk.encryptedData = nil
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pk *PrivateKey) parseElGamalPrivateKey(data []byte) (err error) {
|
||||||
|
pub := pk.PublicKey.PublicKey.(*elgamal.PublicKey)
|
||||||
|
priv := new(elgamal.PrivateKey)
|
||||||
|
priv.PublicKey = *pub
|
||||||
|
|
||||||
|
buf := bytes.NewBuffer(data)
|
||||||
|
x, _, err := readMPI(buf)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
priv.X = new(big.Int).SetBytes(x)
|
||||||
|
pk.PrivateKey = priv
|
||||||
|
pk.Encrypted = false
|
||||||
|
pk.encryptedData = nil
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pk *PrivateKey) parseECDSAPrivateKey(data []byte) (err error) {
|
||||||
|
ecdsaPub := pk.PublicKey.PublicKey.(*ecdsa.PublicKey)
|
||||||
|
|
||||||
|
buf := bytes.NewBuffer(data)
|
||||||
|
d, _, err := readMPI(buf)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
pk.PrivateKey = &ecdsa.PrivateKey{
|
||||||
|
PublicKey: *ecdsaPub,
|
||||||
|
D: new(big.Int).SetBytes(d),
|
||||||
|
}
|
||||||
|
pk.Encrypted = false
|
||||||
|
pk.encryptedData = nil
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
748
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/packet/public_key.go
generated
vendored
Normal file
748
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/packet/public_key.go
generated
vendored
Normal file
|
@ -0,0 +1,748 @@
|
||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package packet
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto"
|
||||||
|
"crypto/dsa"
|
||||||
|
"crypto/ecdsa"
|
||||||
|
"crypto/elliptic"
|
||||||
|
"crypto/rsa"
|
||||||
|
"crypto/sha1"
|
||||||
|
_ "crypto/sha256"
|
||||||
|
_ "crypto/sha512"
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"hash"
|
||||||
|
"io"
|
||||||
|
"math/big"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/crypto/openpgp/elgamal"
|
||||||
|
"golang.org/x/crypto/openpgp/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// NIST curve P-256
|
||||||
|
oidCurveP256 []byte = []byte{0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x03, 0x01, 0x07}
|
||||||
|
// NIST curve P-384
|
||||||
|
oidCurveP384 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x22}
|
||||||
|
// NIST curve P-521
|
||||||
|
oidCurveP521 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x23}
|
||||||
|
)
|
||||||
|
|
||||||
|
const maxOIDLength = 8
|
||||||
|
|
||||||
|
// ecdsaKey stores the algorithm-specific fields for ECDSA keys.
|
||||||
|
// as defined in RFC 6637, Section 9.
|
||||||
|
type ecdsaKey struct {
|
||||||
|
// oid contains the OID byte sequence identifying the elliptic curve used
|
||||||
|
oid []byte
|
||||||
|
// p contains the elliptic curve point that represents the public key
|
||||||
|
p parsedMPI
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseOID reads the OID for the curve as defined in RFC 6637, Section 9.
|
||||||
|
func parseOID(r io.Reader) (oid []byte, err error) {
|
||||||
|
buf := make([]byte, maxOIDLength)
|
||||||
|
if _, err = readFull(r, buf[:1]); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
oidLen := buf[0]
|
||||||
|
if int(oidLen) > len(buf) {
|
||||||
|
err = errors.UnsupportedError("invalid oid length: " + strconv.Itoa(int(oidLen)))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
oid = buf[:oidLen]
|
||||||
|
_, err = readFull(r, oid)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *ecdsaKey) parse(r io.Reader) (err error) {
|
||||||
|
if f.oid, err = parseOID(r); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
f.p.bytes, f.p.bitLength, err = readMPI(r)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *ecdsaKey) serialize(w io.Writer) (err error) {
|
||||||
|
buf := make([]byte, maxOIDLength+1)
|
||||||
|
buf[0] = byte(len(f.oid))
|
||||||
|
copy(buf[1:], f.oid)
|
||||||
|
if _, err = w.Write(buf[:len(f.oid)+1]); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return writeMPIs(w, f.p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *ecdsaKey) newECDSA() (*ecdsa.PublicKey, error) {
|
||||||
|
var c elliptic.Curve
|
||||||
|
if bytes.Equal(f.oid, oidCurveP256) {
|
||||||
|
c = elliptic.P256()
|
||||||
|
} else if bytes.Equal(f.oid, oidCurveP384) {
|
||||||
|
c = elliptic.P384()
|
||||||
|
} else if bytes.Equal(f.oid, oidCurveP521) {
|
||||||
|
c = elliptic.P521()
|
||||||
|
} else {
|
||||||
|
return nil, errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", f.oid))
|
||||||
|
}
|
||||||
|
x, y := elliptic.Unmarshal(c, f.p.bytes)
|
||||||
|
if x == nil {
|
||||||
|
return nil, errors.UnsupportedError("failed to parse EC point")
|
||||||
|
}
|
||||||
|
return &ecdsa.PublicKey{Curve: c, X: x, Y: y}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *ecdsaKey) byteLen() int {
|
||||||
|
return 1 + len(f.oid) + 2 + len(f.p.bytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
type kdfHashFunction byte
|
||||||
|
type kdfAlgorithm byte
|
||||||
|
|
||||||
|
// ecdhKdf stores key derivation function parameters
|
||||||
|
// used for ECDH encryption. See RFC 6637, Section 9.
|
||||||
|
type ecdhKdf struct {
|
||||||
|
KdfHash kdfHashFunction
|
||||||
|
KdfAlgo kdfAlgorithm
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *ecdhKdf) parse(r io.Reader) (err error) {
|
||||||
|
buf := make([]byte, 1)
|
||||||
|
if _, err = readFull(r, buf); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
kdfLen := int(buf[0])
|
||||||
|
if kdfLen < 3 {
|
||||||
|
return errors.UnsupportedError("Unsupported ECDH KDF length: " + strconv.Itoa(kdfLen))
|
||||||
|
}
|
||||||
|
buf = make([]byte, kdfLen)
|
||||||
|
if _, err = readFull(r, buf); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
reserved := int(buf[0])
|
||||||
|
f.KdfHash = kdfHashFunction(buf[1])
|
||||||
|
f.KdfAlgo = kdfAlgorithm(buf[2])
|
||||||
|
if reserved != 0x01 {
|
||||||
|
return errors.UnsupportedError("Unsupported KDF reserved field: " + strconv.Itoa(reserved))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *ecdhKdf) serialize(w io.Writer) (err error) {
|
||||||
|
buf := make([]byte, 4)
|
||||||
|
// See RFC 6637, Section 9, Algorithm-Specific Fields for ECDH keys.
|
||||||
|
buf[0] = byte(0x03) // Length of the following fields
|
||||||
|
buf[1] = byte(0x01) // Reserved for future extensions, must be 1 for now
|
||||||
|
buf[2] = byte(f.KdfHash)
|
||||||
|
buf[3] = byte(f.KdfAlgo)
|
||||||
|
_, err = w.Write(buf[:])
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *ecdhKdf) byteLen() int {
|
||||||
|
return 4
|
||||||
|
}
|
||||||
|
|
||||||
|
// PublicKey represents an OpenPGP public key. See RFC 4880, section 5.5.2.
|
||||||
|
type PublicKey struct {
|
||||||
|
CreationTime time.Time
|
||||||
|
PubKeyAlgo PublicKeyAlgorithm
|
||||||
|
PublicKey interface{} // *rsa.PublicKey, *dsa.PublicKey or *ecdsa.PublicKey
|
||||||
|
Fingerprint [20]byte
|
||||||
|
KeyId uint64
|
||||||
|
IsSubkey bool
|
||||||
|
|
||||||
|
n, e, p, q, g, y parsedMPI
|
||||||
|
|
||||||
|
// RFC 6637 fields
|
||||||
|
ec *ecdsaKey
|
||||||
|
ecdh *ecdhKdf
|
||||||
|
}
|
||||||
|
|
||||||
|
// signingKey provides a convenient abstraction over signature verification
|
||||||
|
// for v3 and v4 public keys.
|
||||||
|
type signingKey interface {
|
||||||
|
SerializeSignaturePrefix(io.Writer)
|
||||||
|
serializeWithoutHeaders(io.Writer) error
|
||||||
|
}
|
||||||
|
|
||||||
|
func fromBig(n *big.Int) parsedMPI {
|
||||||
|
return parsedMPI{
|
||||||
|
bytes: n.Bytes(),
|
||||||
|
bitLength: uint16(n.BitLen()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRSAPublicKey returns a PublicKey that wraps the given rsa.PublicKey.
|
||||||
|
func NewRSAPublicKey(creationTime time.Time, pub *rsa.PublicKey) *PublicKey {
|
||||||
|
pk := &PublicKey{
|
||||||
|
CreationTime: creationTime,
|
||||||
|
PubKeyAlgo: PubKeyAlgoRSA,
|
||||||
|
PublicKey: pub,
|
||||||
|
n: fromBig(pub.N),
|
||||||
|
e: fromBig(big.NewInt(int64(pub.E))),
|
||||||
|
}
|
||||||
|
|
||||||
|
pk.setFingerPrintAndKeyId()
|
||||||
|
return pk
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDSAPublicKey returns a PublicKey that wraps the given dsa.PublicKey.
|
||||||
|
func NewDSAPublicKey(creationTime time.Time, pub *dsa.PublicKey) *PublicKey {
|
||||||
|
pk := &PublicKey{
|
||||||
|
CreationTime: creationTime,
|
||||||
|
PubKeyAlgo: PubKeyAlgoDSA,
|
||||||
|
PublicKey: pub,
|
||||||
|
p: fromBig(pub.P),
|
||||||
|
q: fromBig(pub.Q),
|
||||||
|
g: fromBig(pub.G),
|
||||||
|
y: fromBig(pub.Y),
|
||||||
|
}
|
||||||
|
|
||||||
|
pk.setFingerPrintAndKeyId()
|
||||||
|
return pk
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewElGamalPublicKey returns a PublicKey that wraps the given elgamal.PublicKey.
|
||||||
|
func NewElGamalPublicKey(creationTime time.Time, pub *elgamal.PublicKey) *PublicKey {
|
||||||
|
pk := &PublicKey{
|
||||||
|
CreationTime: creationTime,
|
||||||
|
PubKeyAlgo: PubKeyAlgoElGamal,
|
||||||
|
PublicKey: pub,
|
||||||
|
p: fromBig(pub.P),
|
||||||
|
g: fromBig(pub.G),
|
||||||
|
y: fromBig(pub.Y),
|
||||||
|
}
|
||||||
|
|
||||||
|
pk.setFingerPrintAndKeyId()
|
||||||
|
return pk
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewECDSAPublicKey(creationTime time.Time, pub *ecdsa.PublicKey) *PublicKey {
|
||||||
|
pk := &PublicKey{
|
||||||
|
CreationTime: creationTime,
|
||||||
|
PubKeyAlgo: PubKeyAlgoECDSA,
|
||||||
|
PublicKey: pub,
|
||||||
|
ec: new(ecdsaKey),
|
||||||
|
}
|
||||||
|
|
||||||
|
switch pub.Curve {
|
||||||
|
case elliptic.P256():
|
||||||
|
pk.ec.oid = oidCurveP256
|
||||||
|
case elliptic.P384():
|
||||||
|
pk.ec.oid = oidCurveP384
|
||||||
|
case elliptic.P521():
|
||||||
|
pk.ec.oid = oidCurveP521
|
||||||
|
default:
|
||||||
|
panic("unknown elliptic curve")
|
||||||
|
}
|
||||||
|
|
||||||
|
pk.ec.p.bytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y)
|
||||||
|
pk.ec.p.bitLength = uint16(8 * len(pk.ec.p.bytes))
|
||||||
|
|
||||||
|
pk.setFingerPrintAndKeyId()
|
||||||
|
return pk
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pk *PublicKey) parse(r io.Reader) (err error) {
|
||||||
|
// RFC 4880, section 5.5.2
|
||||||
|
var buf [6]byte
|
||||||
|
_, err = readFull(r, buf[:])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if buf[0] != 4 {
|
||||||
|
return errors.UnsupportedError("public key version")
|
||||||
|
}
|
||||||
|
pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0)
|
||||||
|
pk.PubKeyAlgo = PublicKeyAlgorithm(buf[5])
|
||||||
|
switch pk.PubKeyAlgo {
|
||||||
|
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
|
||||||
|
err = pk.parseRSA(r)
|
||||||
|
case PubKeyAlgoDSA:
|
||||||
|
err = pk.parseDSA(r)
|
||||||
|
case PubKeyAlgoElGamal:
|
||||||
|
err = pk.parseElGamal(r)
|
||||||
|
case PubKeyAlgoECDSA:
|
||||||
|
pk.ec = new(ecdsaKey)
|
||||||
|
if err = pk.ec.parse(r); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
pk.PublicKey, err = pk.ec.newECDSA()
|
||||||
|
case PubKeyAlgoECDH:
|
||||||
|
pk.ec = new(ecdsaKey)
|
||||||
|
if err = pk.ec.parse(r); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
pk.ecdh = new(ecdhKdf)
|
||||||
|
if err = pk.ecdh.parse(r); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// The ECDH key is stored in an ecdsa.PublicKey for convenience.
|
||||||
|
pk.PublicKey, err = pk.ec.newECDSA()
|
||||||
|
default:
|
||||||
|
err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo)))
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
pk.setFingerPrintAndKeyId()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pk *PublicKey) setFingerPrintAndKeyId() {
|
||||||
|
// RFC 4880, section 12.2
|
||||||
|
fingerPrint := sha1.New()
|
||||||
|
pk.SerializeSignaturePrefix(fingerPrint)
|
||||||
|
pk.serializeWithoutHeaders(fingerPrint)
|
||||||
|
copy(pk.Fingerprint[:], fingerPrint.Sum(nil))
|
||||||
|
pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[12:20])
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseRSA parses RSA public key material from the given Reader. See RFC 4880,
|
||||||
|
// section 5.5.2.
|
||||||
|
func (pk *PublicKey) parseRSA(r io.Reader) (err error) {
|
||||||
|
pk.n.bytes, pk.n.bitLength, err = readMPI(r)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
pk.e.bytes, pk.e.bitLength, err = readMPI(r)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(pk.e.bytes) > 3 {
|
||||||
|
err = errors.UnsupportedError("large public exponent")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
rsa := &rsa.PublicKey{
|
||||||
|
N: new(big.Int).SetBytes(pk.n.bytes),
|
||||||
|
E: 0,
|
||||||
|
}
|
||||||
|
for i := 0; i < len(pk.e.bytes); i++ {
|
||||||
|
rsa.E <<= 8
|
||||||
|
rsa.E |= int(pk.e.bytes[i])
|
||||||
|
}
|
||||||
|
pk.PublicKey = rsa
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseDSA parses DSA public key material from the given Reader. See RFC 4880,
|
||||||
|
// section 5.5.2.
|
||||||
|
func (pk *PublicKey) parseDSA(r io.Reader) (err error) {
|
||||||
|
pk.p.bytes, pk.p.bitLength, err = readMPI(r)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
pk.q.bytes, pk.q.bitLength, err = readMPI(r)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
pk.g.bytes, pk.g.bitLength, err = readMPI(r)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
pk.y.bytes, pk.y.bitLength, err = readMPI(r)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
dsa := new(dsa.PublicKey)
|
||||||
|
dsa.P = new(big.Int).SetBytes(pk.p.bytes)
|
||||||
|
dsa.Q = new(big.Int).SetBytes(pk.q.bytes)
|
||||||
|
dsa.G = new(big.Int).SetBytes(pk.g.bytes)
|
||||||
|
dsa.Y = new(big.Int).SetBytes(pk.y.bytes)
|
||||||
|
pk.PublicKey = dsa
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseElGamal parses ElGamal public key material from the given Reader. See
|
||||||
|
// RFC 4880, section 5.5.2.
|
||||||
|
func (pk *PublicKey) parseElGamal(r io.Reader) (err error) {
|
||||||
|
pk.p.bytes, pk.p.bitLength, err = readMPI(r)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
pk.g.bytes, pk.g.bitLength, err = readMPI(r)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
pk.y.bytes, pk.y.bitLength, err = readMPI(r)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
elgamal := new(elgamal.PublicKey)
|
||||||
|
elgamal.P = new(big.Int).SetBytes(pk.p.bytes)
|
||||||
|
elgamal.G = new(big.Int).SetBytes(pk.g.bytes)
|
||||||
|
elgamal.Y = new(big.Int).SetBytes(pk.y.bytes)
|
||||||
|
pk.PublicKey = elgamal
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// SerializeSignaturePrefix writes the prefix for this public key to the given Writer.
|
||||||
|
// The prefix is used when calculating a signature over this public key. See
|
||||||
|
// RFC 4880, section 5.2.4.
|
||||||
|
func (pk *PublicKey) SerializeSignaturePrefix(h io.Writer) {
|
||||||
|
var pLength uint16
|
||||||
|
switch pk.PubKeyAlgo {
|
||||||
|
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
|
||||||
|
pLength += 2 + uint16(len(pk.n.bytes))
|
||||||
|
pLength += 2 + uint16(len(pk.e.bytes))
|
||||||
|
case PubKeyAlgoDSA:
|
||||||
|
pLength += 2 + uint16(len(pk.p.bytes))
|
||||||
|
pLength += 2 + uint16(len(pk.q.bytes))
|
||||||
|
pLength += 2 + uint16(len(pk.g.bytes))
|
||||||
|
pLength += 2 + uint16(len(pk.y.bytes))
|
||||||
|
case PubKeyAlgoElGamal:
|
||||||
|
pLength += 2 + uint16(len(pk.p.bytes))
|
||||||
|
pLength += 2 + uint16(len(pk.g.bytes))
|
||||||
|
pLength += 2 + uint16(len(pk.y.bytes))
|
||||||
|
case PubKeyAlgoECDSA:
|
||||||
|
pLength += uint16(pk.ec.byteLen())
|
||||||
|
case PubKeyAlgoECDH:
|
||||||
|
pLength += uint16(pk.ec.byteLen())
|
||||||
|
pLength += uint16(pk.ecdh.byteLen())
|
||||||
|
default:
|
||||||
|
panic("unknown public key algorithm")
|
||||||
|
}
|
||||||
|
pLength += 6
|
||||||
|
h.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pk *PublicKey) Serialize(w io.Writer) (err error) {
|
||||||
|
length := 6 // 6 byte header
|
||||||
|
|
||||||
|
switch pk.PubKeyAlgo {
|
||||||
|
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
|
||||||
|
length += 2 + len(pk.n.bytes)
|
||||||
|
length += 2 + len(pk.e.bytes)
|
||||||
|
case PubKeyAlgoDSA:
|
||||||
|
length += 2 + len(pk.p.bytes)
|
||||||
|
length += 2 + len(pk.q.bytes)
|
||||||
|
length += 2 + len(pk.g.bytes)
|
||||||
|
length += 2 + len(pk.y.bytes)
|
||||||
|
case PubKeyAlgoElGamal:
|
||||||
|
length += 2 + len(pk.p.bytes)
|
||||||
|
length += 2 + len(pk.g.bytes)
|
||||||
|
length += 2 + len(pk.y.bytes)
|
||||||
|
case PubKeyAlgoECDSA:
|
||||||
|
length += pk.ec.byteLen()
|
||||||
|
case PubKeyAlgoECDH:
|
||||||
|
length += pk.ec.byteLen()
|
||||||
|
length += pk.ecdh.byteLen()
|
||||||
|
default:
|
||||||
|
panic("unknown public key algorithm")
|
||||||
|
}
|
||||||
|
|
||||||
|
packetType := packetTypePublicKey
|
||||||
|
if pk.IsSubkey {
|
||||||
|
packetType = packetTypePublicSubkey
|
||||||
|
}
|
||||||
|
err = serializeHeader(w, packetType, length)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return pk.serializeWithoutHeaders(w)
|
||||||
|
}
|
||||||
|
|
||||||
|
// serializeWithoutHeaders marshals the PublicKey to w in the form of an
|
||||||
|
// OpenPGP public key packet, not including the packet header.
|
||||||
|
func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) {
|
||||||
|
var buf [6]byte
|
||||||
|
buf[0] = 4
|
||||||
|
t := uint32(pk.CreationTime.Unix())
|
||||||
|
buf[1] = byte(t >> 24)
|
||||||
|
buf[2] = byte(t >> 16)
|
||||||
|
buf[3] = byte(t >> 8)
|
||||||
|
buf[4] = byte(t)
|
||||||
|
buf[5] = byte(pk.PubKeyAlgo)
|
||||||
|
|
||||||
|
_, err = w.Write(buf[:])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
switch pk.PubKeyAlgo {
|
||||||
|
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
|
||||||
|
return writeMPIs(w, pk.n, pk.e)
|
||||||
|
case PubKeyAlgoDSA:
|
||||||
|
return writeMPIs(w, pk.p, pk.q, pk.g, pk.y)
|
||||||
|
case PubKeyAlgoElGamal:
|
||||||
|
return writeMPIs(w, pk.p, pk.g, pk.y)
|
||||||
|
case PubKeyAlgoECDSA:
|
||||||
|
return pk.ec.serialize(w)
|
||||||
|
case PubKeyAlgoECDH:
|
||||||
|
if err = pk.ec.serialize(w); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return pk.ecdh.serialize(w)
|
||||||
|
}
|
||||||
|
return errors.InvalidArgumentError("bad public-key algorithm")
|
||||||
|
}
|
||||||
|
|
||||||
|
// CanSign returns true iff this public key can generate signatures
|
||||||
|
func (pk *PublicKey) CanSign() bool {
|
||||||
|
return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly && pk.PubKeyAlgo != PubKeyAlgoElGamal
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifySignature returns nil iff sig is a valid signature, made by this
|
||||||
|
// public key, of the data hashed into signed. signed is mutated by this call.
|
||||||
|
func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err error) {
|
||||||
|
if !pk.CanSign() {
|
||||||
|
return errors.InvalidArgumentError("public key cannot generate signatures")
|
||||||
|
}
|
||||||
|
|
||||||
|
signed.Write(sig.HashSuffix)
|
||||||
|
hashBytes := signed.Sum(nil)
|
||||||
|
|
||||||
|
if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] {
|
||||||
|
return errors.SignatureError("hash tag doesn't match")
|
||||||
|
}
|
||||||
|
|
||||||
|
if pk.PubKeyAlgo != sig.PubKeyAlgo {
|
||||||
|
return errors.InvalidArgumentError("public key and signature use different algorithms")
|
||||||
|
}
|
||||||
|
|
||||||
|
switch pk.PubKeyAlgo {
|
||||||
|
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
|
||||||
|
rsaPublicKey, _ := pk.PublicKey.(*rsa.PublicKey)
|
||||||
|
err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes)
|
||||||
|
if err != nil {
|
||||||
|
return errors.SignatureError("RSA verification failure")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
case PubKeyAlgoDSA:
|
||||||
|
dsaPublicKey, _ := pk.PublicKey.(*dsa.PublicKey)
|
||||||
|
// Need to truncate hashBytes to match FIPS 186-3 section 4.6.
|
||||||
|
subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8
|
||||||
|
if len(hashBytes) > subgroupSize {
|
||||||
|
hashBytes = hashBytes[:subgroupSize]
|
||||||
|
}
|
||||||
|
if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) {
|
||||||
|
return errors.SignatureError("DSA verification failure")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
case PubKeyAlgoECDSA:
|
||||||
|
ecdsaPublicKey := pk.PublicKey.(*ecdsa.PublicKey)
|
||||||
|
if !ecdsa.Verify(ecdsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.ECDSASigR.bytes), new(big.Int).SetBytes(sig.ECDSASigS.bytes)) {
|
||||||
|
return errors.SignatureError("ECDSA verification failure")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
return errors.SignatureError("Unsupported public key algorithm used in signature")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifySignatureV3 returns nil iff sig is a valid signature, made by this
|
||||||
|
// public key, of the data hashed into signed. signed is mutated by this call.
|
||||||
|
func (pk *PublicKey) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) {
|
||||||
|
if !pk.CanSign() {
|
||||||
|
return errors.InvalidArgumentError("public key cannot generate signatures")
|
||||||
|
}
|
||||||
|
|
||||||
|
suffix := make([]byte, 5)
|
||||||
|
suffix[0] = byte(sig.SigType)
|
||||||
|
binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix()))
|
||||||
|
signed.Write(suffix)
|
||||||
|
hashBytes := signed.Sum(nil)
|
||||||
|
|
||||||
|
if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] {
|
||||||
|
return errors.SignatureError("hash tag doesn't match")
|
||||||
|
}
|
||||||
|
|
||||||
|
if pk.PubKeyAlgo != sig.PubKeyAlgo {
|
||||||
|
return errors.InvalidArgumentError("public key and signature use different algorithms")
|
||||||
|
}
|
||||||
|
|
||||||
|
switch pk.PubKeyAlgo {
|
||||||
|
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
|
||||||
|
rsaPublicKey := pk.PublicKey.(*rsa.PublicKey)
|
||||||
|
if err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes); err != nil {
|
||||||
|
return errors.SignatureError("RSA verification failure")
|
||||||
|
}
|
||||||
|
return
|
||||||
|
case PubKeyAlgoDSA:
|
||||||
|
dsaPublicKey := pk.PublicKey.(*dsa.PublicKey)
|
||||||
|
// Need to truncate hashBytes to match FIPS 186-3 section 4.6.
|
||||||
|
subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8
|
||||||
|
if len(hashBytes) > subgroupSize {
|
||||||
|
hashBytes = hashBytes[:subgroupSize]
|
||||||
|
}
|
||||||
|
if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) {
|
||||||
|
return errors.SignatureError("DSA verification failure")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
panic("shouldn't happen")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// keySignatureHash returns a Hash of the message that needs to be signed for
|
||||||
|
// pk to assert a subkey relationship to signed.
|
||||||
|
func keySignatureHash(pk, signed signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) {
|
||||||
|
if !hashFunc.Available() {
|
||||||
|
return nil, errors.UnsupportedError("hash function")
|
||||||
|
}
|
||||||
|
h = hashFunc.New()
|
||||||
|
|
||||||
|
// RFC 4880, section 5.2.4
|
||||||
|
pk.SerializeSignaturePrefix(h)
|
||||||
|
pk.serializeWithoutHeaders(h)
|
||||||
|
signed.SerializeSignaturePrefix(h)
|
||||||
|
signed.serializeWithoutHeaders(h)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifyKeySignature returns nil iff sig is a valid signature, made by this
|
||||||
|
// public key, of signed.
|
||||||
|
func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error {
|
||||||
|
h, err := keySignatureHash(pk, signed, sig.Hash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err = pk.VerifySignature(h, sig); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if sig.FlagSign {
|
||||||
|
// Signing subkeys must be cross-signed. See
|
||||||
|
// https://www.gnupg.org/faq/subkey-cross-certify.html.
|
||||||
|
if sig.EmbeddedSignature == nil {
|
||||||
|
return errors.StructuralError("signing subkey is missing cross-signature")
|
||||||
|
}
|
||||||
|
// Verify the cross-signature. This is calculated over the same
|
||||||
|
// data as the main signature, so we cannot just recursively
|
||||||
|
// call signed.VerifyKeySignature(...)
|
||||||
|
if h, err = keySignatureHash(pk, signed, sig.EmbeddedSignature.Hash); err != nil {
|
||||||
|
return errors.StructuralError("error while hashing for cross-signature: " + err.Error())
|
||||||
|
}
|
||||||
|
if err := signed.VerifySignature(h, sig.EmbeddedSignature); err != nil {
|
||||||
|
return errors.StructuralError("error while verifying cross-signature: " + err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func keyRevocationHash(pk signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) {
|
||||||
|
if !hashFunc.Available() {
|
||||||
|
return nil, errors.UnsupportedError("hash function")
|
||||||
|
}
|
||||||
|
h = hashFunc.New()
|
||||||
|
|
||||||
|
// RFC 4880, section 5.2.4
|
||||||
|
pk.SerializeSignaturePrefix(h)
|
||||||
|
pk.serializeWithoutHeaders(h)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifyRevocationSignature returns nil iff sig is a valid signature, made by this
|
||||||
|
// public key.
|
||||||
|
func (pk *PublicKey) VerifyRevocationSignature(sig *Signature) (err error) {
|
||||||
|
h, err := keyRevocationHash(pk, sig.Hash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return pk.VerifySignature(h, sig)
|
||||||
|
}
|
||||||
|
|
||||||
|
// userIdSignatureHash returns a Hash of the message that needs to be signed
|
||||||
|
// to assert that pk is a valid key for id.
|
||||||
|
func userIdSignatureHash(id string, pk *PublicKey, hashFunc crypto.Hash) (h hash.Hash, err error) {
|
||||||
|
if !hashFunc.Available() {
|
||||||
|
return nil, errors.UnsupportedError("hash function")
|
||||||
|
}
|
||||||
|
h = hashFunc.New()
|
||||||
|
|
||||||
|
// RFC 4880, section 5.2.4
|
||||||
|
pk.SerializeSignaturePrefix(h)
|
||||||
|
pk.serializeWithoutHeaders(h)
|
||||||
|
|
||||||
|
var buf [5]byte
|
||||||
|
buf[0] = 0xb4
|
||||||
|
buf[1] = byte(len(id) >> 24)
|
||||||
|
buf[2] = byte(len(id) >> 16)
|
||||||
|
buf[3] = byte(len(id) >> 8)
|
||||||
|
buf[4] = byte(len(id))
|
||||||
|
h.Write(buf[:])
|
||||||
|
h.Write([]byte(id))
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifyUserIdSignature returns nil iff sig is a valid signature, made by this
|
||||||
|
// public key, that id is the identity of pub.
|
||||||
|
func (pk *PublicKey) VerifyUserIdSignature(id string, pub *PublicKey, sig *Signature) (err error) {
|
||||||
|
h, err := userIdSignatureHash(id, pub, sig.Hash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return pk.VerifySignature(h, sig)
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this
|
||||||
|
// public key, that id is the identity of pub.
|
||||||
|
func (pk *PublicKey) VerifyUserIdSignatureV3(id string, pub *PublicKey, sig *SignatureV3) (err error) {
|
||||||
|
h, err := userIdSignatureV3Hash(id, pub, sig.Hash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return pk.VerifySignatureV3(h, sig)
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyIdString returns the public key's fingerprint in capital hex
|
||||||
|
// (e.g. "6C7EE1B8621CC013").
|
||||||
|
func (pk *PublicKey) KeyIdString() string {
|
||||||
|
return fmt.Sprintf("%X", pk.Fingerprint[12:20])
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyIdShortString returns the short form of public key's fingerprint
|
||||||
|
// in capital hex, as shown by gpg --list-keys (e.g. "621CC013").
|
||||||
|
func (pk *PublicKey) KeyIdShortString() string {
|
||||||
|
return fmt.Sprintf("%X", pk.Fingerprint[16:20])
|
||||||
|
}
|
||||||
|
|
||||||
|
// A parsedMPI is used to store the contents of a big integer, along with the
|
||||||
|
// bit length that was specified in the original input. This allows the MPI to
|
||||||
|
// be reserialized exactly.
|
||||||
|
type parsedMPI struct {
|
||||||
|
bytes []byte
|
||||||
|
bitLength uint16
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeMPIs is a utility function for serializing several big integers to the
|
||||||
|
// given Writer.
|
||||||
|
func writeMPIs(w io.Writer, mpis ...parsedMPI) (err error) {
|
||||||
|
for _, mpi := range mpis {
|
||||||
|
err = writeMPI(w, mpi.bitLength, mpi.bytes)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// BitLength returns the bit length for the given public key.
|
||||||
|
func (pk *PublicKey) BitLength() (bitLength uint16, err error) {
|
||||||
|
switch pk.PubKeyAlgo {
|
||||||
|
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
|
||||||
|
bitLength = pk.n.bitLength
|
||||||
|
case PubKeyAlgoDSA:
|
||||||
|
bitLength = pk.p.bitLength
|
||||||
|
case PubKeyAlgoElGamal:
|
||||||
|
bitLength = pk.p.bitLength
|
||||||
|
default:
|
||||||
|
err = errors.InvalidArgumentError("bad public-key algorithm")
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
279
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go
generated
vendored
Normal file
279
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go
generated
vendored
Normal file
|
@ -0,0 +1,279 @@
|
||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package packet
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto"
|
||||||
|
"crypto/md5"
|
||||||
|
"crypto/rsa"
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"hash"
|
||||||
|
"io"
|
||||||
|
"math/big"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/crypto/openpgp/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PublicKeyV3 represents older, version 3 public keys. These keys are less secure and
|
||||||
|
// should not be used for signing or encrypting. They are supported here only for
|
||||||
|
// parsing version 3 key material and validating signatures.
|
||||||
|
// See RFC 4880, section 5.5.2.
|
||||||
|
type PublicKeyV3 struct {
|
||||||
|
CreationTime time.Time
|
||||||
|
DaysToExpire uint16
|
||||||
|
PubKeyAlgo PublicKeyAlgorithm
|
||||||
|
PublicKey *rsa.PublicKey
|
||||||
|
Fingerprint [16]byte
|
||||||
|
KeyId uint64
|
||||||
|
IsSubkey bool
|
||||||
|
|
||||||
|
n, e parsedMPI
|
||||||
|
}
|
||||||
|
|
||||||
|
// newRSAPublicKeyV3 returns a PublicKey that wraps the given rsa.PublicKey.
|
||||||
|
// Included here for testing purposes only. RFC 4880, section 5.5.2:
|
||||||
|
// "an implementation MUST NOT generate a V3 key, but MAY accept it."
|
||||||
|
func newRSAPublicKeyV3(creationTime time.Time, pub *rsa.PublicKey) *PublicKeyV3 {
|
||||||
|
pk := &PublicKeyV3{
|
||||||
|
CreationTime: creationTime,
|
||||||
|
PublicKey: pub,
|
||||||
|
n: fromBig(pub.N),
|
||||||
|
e: fromBig(big.NewInt(int64(pub.E))),
|
||||||
|
}
|
||||||
|
|
||||||
|
pk.setFingerPrintAndKeyId()
|
||||||
|
return pk
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pk *PublicKeyV3) parse(r io.Reader) (err error) {
|
||||||
|
// RFC 4880, section 5.5.2
|
||||||
|
var buf [8]byte
|
||||||
|
if _, err = readFull(r, buf[:]); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if buf[0] < 2 || buf[0] > 3 {
|
||||||
|
return errors.UnsupportedError("public key version")
|
||||||
|
}
|
||||||
|
pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0)
|
||||||
|
pk.DaysToExpire = binary.BigEndian.Uint16(buf[5:7])
|
||||||
|
pk.PubKeyAlgo = PublicKeyAlgorithm(buf[7])
|
||||||
|
switch pk.PubKeyAlgo {
|
||||||
|
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
|
||||||
|
err = pk.parseRSA(r)
|
||||||
|
default:
|
||||||
|
err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo)))
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
pk.setFingerPrintAndKeyId()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pk *PublicKeyV3) setFingerPrintAndKeyId() {
|
||||||
|
// RFC 4880, section 12.2
|
||||||
|
fingerPrint := md5.New()
|
||||||
|
fingerPrint.Write(pk.n.bytes)
|
||||||
|
fingerPrint.Write(pk.e.bytes)
|
||||||
|
fingerPrint.Sum(pk.Fingerprint[:0])
|
||||||
|
pk.KeyId = binary.BigEndian.Uint64(pk.n.bytes[len(pk.n.bytes)-8:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseRSA parses RSA public key material from the given Reader. See RFC 4880,
|
||||||
|
// section 5.5.2.
|
||||||
|
func (pk *PublicKeyV3) parseRSA(r io.Reader) (err error) {
|
||||||
|
if pk.n.bytes, pk.n.bitLength, err = readMPI(r); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if pk.e.bytes, pk.e.bitLength, err = readMPI(r); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// RFC 4880 Section 12.2 requires the low 8 bytes of the
|
||||||
|
// modulus to form the key id.
|
||||||
|
if len(pk.n.bytes) < 8 {
|
||||||
|
return errors.StructuralError("v3 public key modulus is too short")
|
||||||
|
}
|
||||||
|
if len(pk.e.bytes) > 3 {
|
||||||
|
err = errors.UnsupportedError("large public exponent")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
rsa := &rsa.PublicKey{N: new(big.Int).SetBytes(pk.n.bytes)}
|
||||||
|
for i := 0; i < len(pk.e.bytes); i++ {
|
||||||
|
rsa.E <<= 8
|
||||||
|
rsa.E |= int(pk.e.bytes[i])
|
||||||
|
}
|
||||||
|
pk.PublicKey = rsa
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// SerializeSignaturePrefix writes the prefix for this public key to the given Writer.
|
||||||
|
// The prefix is used when calculating a signature over this public key. See
|
||||||
|
// RFC 4880, section 5.2.4.
|
||||||
|
func (pk *PublicKeyV3) SerializeSignaturePrefix(w io.Writer) {
|
||||||
|
var pLength uint16
|
||||||
|
switch pk.PubKeyAlgo {
|
||||||
|
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
|
||||||
|
pLength += 2 + uint16(len(pk.n.bytes))
|
||||||
|
pLength += 2 + uint16(len(pk.e.bytes))
|
||||||
|
default:
|
||||||
|
panic("unknown public key algorithm")
|
||||||
|
}
|
||||||
|
pLength += 6
|
||||||
|
w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pk *PublicKeyV3) Serialize(w io.Writer) (err error) {
|
||||||
|
length := 8 // 8 byte header
|
||||||
|
|
||||||
|
switch pk.PubKeyAlgo {
|
||||||
|
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
|
||||||
|
length += 2 + len(pk.n.bytes)
|
||||||
|
length += 2 + len(pk.e.bytes)
|
||||||
|
default:
|
||||||
|
panic("unknown public key algorithm")
|
||||||
|
}
|
||||||
|
|
||||||
|
packetType := packetTypePublicKey
|
||||||
|
if pk.IsSubkey {
|
||||||
|
packetType = packetTypePublicSubkey
|
||||||
|
}
|
||||||
|
if err = serializeHeader(w, packetType, length); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return pk.serializeWithoutHeaders(w)
|
||||||
|
}
|
||||||
|
|
||||||
|
// serializeWithoutHeaders marshals the PublicKey to w in the form of an
|
||||||
|
// OpenPGP public key packet, not including the packet header.
|
||||||
|
func (pk *PublicKeyV3) serializeWithoutHeaders(w io.Writer) (err error) {
|
||||||
|
var buf [8]byte
|
||||||
|
// Version 3
|
||||||
|
buf[0] = 3
|
||||||
|
// Creation time
|
||||||
|
t := uint32(pk.CreationTime.Unix())
|
||||||
|
buf[1] = byte(t >> 24)
|
||||||
|
buf[2] = byte(t >> 16)
|
||||||
|
buf[3] = byte(t >> 8)
|
||||||
|
buf[4] = byte(t)
|
||||||
|
// Days to expire
|
||||||
|
buf[5] = byte(pk.DaysToExpire >> 8)
|
||||||
|
buf[6] = byte(pk.DaysToExpire)
|
||||||
|
// Public key algorithm
|
||||||
|
buf[7] = byte(pk.PubKeyAlgo)
|
||||||
|
|
||||||
|
if _, err = w.Write(buf[:]); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
switch pk.PubKeyAlgo {
|
||||||
|
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
|
||||||
|
return writeMPIs(w, pk.n, pk.e)
|
||||||
|
}
|
||||||
|
return errors.InvalidArgumentError("bad public-key algorithm")
|
||||||
|
}
|
||||||
|
|
||||||
|
// CanSign returns true iff this public key can generate signatures
|
||||||
|
func (pk *PublicKeyV3) CanSign() bool {
|
||||||
|
return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifySignatureV3 returns nil iff sig is a valid signature, made by this
|
||||||
|
// public key, of the data hashed into signed. signed is mutated by this call.
|
||||||
|
func (pk *PublicKeyV3) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) {
|
||||||
|
if !pk.CanSign() {
|
||||||
|
return errors.InvalidArgumentError("public key cannot generate signatures")
|
||||||
|
}
|
||||||
|
|
||||||
|
suffix := make([]byte, 5)
|
||||||
|
suffix[0] = byte(sig.SigType)
|
||||||
|
binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix()))
|
||||||
|
signed.Write(suffix)
|
||||||
|
hashBytes := signed.Sum(nil)
|
||||||
|
|
||||||
|
if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] {
|
||||||
|
return errors.SignatureError("hash tag doesn't match")
|
||||||
|
}
|
||||||
|
|
||||||
|
if pk.PubKeyAlgo != sig.PubKeyAlgo {
|
||||||
|
return errors.InvalidArgumentError("public key and signature use different algorithms")
|
||||||
|
}
|
||||||
|
|
||||||
|
switch pk.PubKeyAlgo {
|
||||||
|
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
|
||||||
|
if err = rsa.VerifyPKCS1v15(pk.PublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes); err != nil {
|
||||||
|
return errors.SignatureError("RSA verification failure")
|
||||||
|
}
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
// V3 public keys only support RSA.
|
||||||
|
panic("shouldn't happen")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this
|
||||||
|
// public key, that id is the identity of pub.
|
||||||
|
func (pk *PublicKeyV3) VerifyUserIdSignatureV3(id string, pub *PublicKeyV3, sig *SignatureV3) (err error) {
|
||||||
|
h, err := userIdSignatureV3Hash(id, pk, sig.Hash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return pk.VerifySignatureV3(h, sig)
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifyKeySignatureV3 returns nil iff sig is a valid signature, made by this
|
||||||
|
// public key, of signed.
|
||||||
|
func (pk *PublicKeyV3) VerifyKeySignatureV3(signed *PublicKeyV3, sig *SignatureV3) (err error) {
|
||||||
|
h, err := keySignatureHash(pk, signed, sig.Hash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return pk.VerifySignatureV3(h, sig)
|
||||||
|
}
|
||||||
|
|
||||||
|
// userIdSignatureV3Hash returns a Hash of the message that needs to be signed
|
||||||
|
// to assert that pk is a valid key for id.
|
||||||
|
func userIdSignatureV3Hash(id string, pk signingKey, hfn crypto.Hash) (h hash.Hash, err error) {
|
||||||
|
if !hfn.Available() {
|
||||||
|
return nil, errors.UnsupportedError("hash function")
|
||||||
|
}
|
||||||
|
h = hfn.New()
|
||||||
|
|
||||||
|
// RFC 4880, section 5.2.4
|
||||||
|
pk.SerializeSignaturePrefix(h)
|
||||||
|
pk.serializeWithoutHeaders(h)
|
||||||
|
|
||||||
|
h.Write([]byte(id))
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyIdString returns the public key's fingerprint in capital hex
|
||||||
|
// (e.g. "6C7EE1B8621CC013").
|
||||||
|
func (pk *PublicKeyV3) KeyIdString() string {
|
||||||
|
return fmt.Sprintf("%X", pk.KeyId)
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyIdShortString returns the short form of public key's fingerprint
|
||||||
|
// in capital hex, as shown by gpg --list-keys (e.g. "621CC013").
|
||||||
|
func (pk *PublicKeyV3) KeyIdShortString() string {
|
||||||
|
return fmt.Sprintf("%X", pk.KeyId&0xFFFFFFFF)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BitLength returns the bit length for the given public key.
|
||||||
|
func (pk *PublicKeyV3) BitLength() (bitLength uint16, err error) {
|
||||||
|
switch pk.PubKeyAlgo {
|
||||||
|
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
|
||||||
|
bitLength = pk.n.bitLength
|
||||||
|
default:
|
||||||
|
err = errors.InvalidArgumentError("bad public-key algorithm")
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
76
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/packet/reader.go
generated
vendored
Normal file
76
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/packet/reader.go
generated
vendored
Normal file
|
@ -0,0 +1,76 @@
|
||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package packet
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/crypto/openpgp/errors"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Reader reads packets from an io.Reader and allows packets to be 'unread' so
|
||||||
|
// that they result from the next call to Next.
|
||||||
|
type Reader struct {
|
||||||
|
q []Packet
|
||||||
|
readers []io.Reader
|
||||||
|
}
|
||||||
|
|
||||||
|
// New io.Readers are pushed when a compressed or encrypted packet is processed
|
||||||
|
// and recursively treated as a new source of packets. However, a carefully
|
||||||
|
// crafted packet can trigger an infinite recursive sequence of packets. See
|
||||||
|
// http://mumble.net/~campbell/misc/pgp-quine
|
||||||
|
// https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2013-4402
|
||||||
|
// This constant limits the number of recursive packets that may be pushed.
|
||||||
|
const maxReaders = 32
|
||||||
|
|
||||||
|
// Next returns the most recently unread Packet, or reads another packet from
|
||||||
|
// the top-most io.Reader. Unknown packet types are skipped.
|
||||||
|
func (r *Reader) Next() (p Packet, err error) {
|
||||||
|
if len(r.q) > 0 {
|
||||||
|
p = r.q[len(r.q)-1]
|
||||||
|
r.q = r.q[:len(r.q)-1]
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for len(r.readers) > 0 {
|
||||||
|
p, err = Read(r.readers[len(r.readers)-1])
|
||||||
|
if err == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err == io.EOF {
|
||||||
|
r.readers = r.readers[:len(r.readers)-1]
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, ok := err.(errors.UnknownPacketTypeError); !ok {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
// Push causes the Reader to start reading from a new io.Reader. When an EOF
|
||||||
|
// error is seen from the new io.Reader, it is popped and the Reader continues
|
||||||
|
// to read from the next most recent io.Reader. Push returns a StructuralError
|
||||||
|
// if pushing the reader would exceed the maximum recursion level, otherwise it
|
||||||
|
// returns nil.
|
||||||
|
func (r *Reader) Push(reader io.Reader) (err error) {
|
||||||
|
if len(r.readers) >= maxReaders {
|
||||||
|
return errors.StructuralError("too many layers of packets")
|
||||||
|
}
|
||||||
|
r.readers = append(r.readers, reader)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unread causes the given Packet to be returned from the next call to Next.
|
||||||
|
func (r *Reader) Unread(p Packet) {
|
||||||
|
r.q = append(r.q, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewReader(r io.Reader) *Reader {
|
||||||
|
return &Reader{
|
||||||
|
q: nil,
|
||||||
|
readers: []io.Reader{r},
|
||||||
|
}
|
||||||
|
}
|
731
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/packet/signature.go
generated
vendored
Normal file
731
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/packet/signature.go
generated
vendored
Normal file
|
@ -0,0 +1,731 @@
|
||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package packet
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto"
|
||||||
|
"crypto/dsa"
|
||||||
|
"crypto/ecdsa"
|
||||||
|
"encoding/asn1"
|
||||||
|
"encoding/binary"
|
||||||
|
"hash"
|
||||||
|
"io"
|
||||||
|
"math/big"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/crypto/openpgp/errors"
|
||||||
|
"golang.org/x/crypto/openpgp/s2k"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// See RFC 4880, section 5.2.3.21 for details.
|
||||||
|
KeyFlagCertify = 1 << iota
|
||||||
|
KeyFlagSign
|
||||||
|
KeyFlagEncryptCommunications
|
||||||
|
KeyFlagEncryptStorage
|
||||||
|
)
|
||||||
|
|
||||||
|
// Signature represents a signature. See RFC 4880, section 5.2.
|
||||||
|
type Signature struct {
|
||||||
|
SigType SignatureType
|
||||||
|
PubKeyAlgo PublicKeyAlgorithm
|
||||||
|
Hash crypto.Hash
|
||||||
|
|
||||||
|
// HashSuffix is extra data that is hashed in after the signed data.
|
||||||
|
HashSuffix []byte
|
||||||
|
// HashTag contains the first two bytes of the hash for fast rejection
|
||||||
|
// of bad signed data.
|
||||||
|
HashTag [2]byte
|
||||||
|
CreationTime time.Time
|
||||||
|
|
||||||
|
RSASignature parsedMPI
|
||||||
|
DSASigR, DSASigS parsedMPI
|
||||||
|
ECDSASigR, ECDSASigS parsedMPI
|
||||||
|
|
||||||
|
// rawSubpackets contains the unparsed subpackets, in order.
|
||||||
|
rawSubpackets []outputSubpacket
|
||||||
|
|
||||||
|
// The following are optional so are nil when not included in the
|
||||||
|
// signature.
|
||||||
|
|
||||||
|
SigLifetimeSecs, KeyLifetimeSecs *uint32
|
||||||
|
PreferredSymmetric, PreferredHash, PreferredCompression []uint8
|
||||||
|
IssuerKeyId *uint64
|
||||||
|
IsPrimaryId *bool
|
||||||
|
|
||||||
|
// FlagsValid is set if any flags were given. See RFC 4880, section
|
||||||
|
// 5.2.3.21 for details.
|
||||||
|
FlagsValid bool
|
||||||
|
FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage bool
|
||||||
|
|
||||||
|
// RevocationReason is set if this signature has been revoked.
|
||||||
|
// See RFC 4880, section 5.2.3.23 for details.
|
||||||
|
RevocationReason *uint8
|
||||||
|
RevocationReasonText string
|
||||||
|
|
||||||
|
// MDC is set if this signature has a feature packet that indicates
|
||||||
|
// support for MDC subpackets.
|
||||||
|
MDC bool
|
||||||
|
|
||||||
|
// EmbeddedSignature, if non-nil, is a signature of the parent key, by
|
||||||
|
// this key. This prevents an attacker from claiming another's signing
|
||||||
|
// subkey as their own.
|
||||||
|
EmbeddedSignature *Signature
|
||||||
|
|
||||||
|
outSubpackets []outputSubpacket
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sig *Signature) parse(r io.Reader) (err error) {
|
||||||
|
// RFC 4880, section 5.2.3
|
||||||
|
var buf [5]byte
|
||||||
|
_, err = readFull(r, buf[:1])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if buf[0] != 4 {
|
||||||
|
err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0])))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = readFull(r, buf[:5])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sig.SigType = SignatureType(buf[0])
|
||||||
|
sig.PubKeyAlgo = PublicKeyAlgorithm(buf[1])
|
||||||
|
switch sig.PubKeyAlgo {
|
||||||
|
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA:
|
||||||
|
default:
|
||||||
|
err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo)))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var ok bool
|
||||||
|
sig.Hash, ok = s2k.HashIdToHash(buf[2])
|
||||||
|
if !ok {
|
||||||
|
return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2])))
|
||||||
|
}
|
||||||
|
|
||||||
|
hashedSubpacketsLength := int(buf[3])<<8 | int(buf[4])
|
||||||
|
l := 6 + hashedSubpacketsLength
|
||||||
|
sig.HashSuffix = make([]byte, l+6)
|
||||||
|
sig.HashSuffix[0] = 4
|
||||||
|
copy(sig.HashSuffix[1:], buf[:5])
|
||||||
|
hashedSubpackets := sig.HashSuffix[6:l]
|
||||||
|
_, err = readFull(r, hashedSubpackets)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// See RFC 4880, section 5.2.4
|
||||||
|
trailer := sig.HashSuffix[l:]
|
||||||
|
trailer[0] = 4
|
||||||
|
trailer[1] = 0xff
|
||||||
|
trailer[2] = uint8(l >> 24)
|
||||||
|
trailer[3] = uint8(l >> 16)
|
||||||
|
trailer[4] = uint8(l >> 8)
|
||||||
|
trailer[5] = uint8(l)
|
||||||
|
|
||||||
|
err = parseSignatureSubpackets(sig, hashedSubpackets, true)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = readFull(r, buf[:2])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
unhashedSubpacketsLength := int(buf[0])<<8 | int(buf[1])
|
||||||
|
unhashedSubpackets := make([]byte, unhashedSubpacketsLength)
|
||||||
|
_, err = readFull(r, unhashedSubpackets)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = parseSignatureSubpackets(sig, unhashedSubpackets, false)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = readFull(r, sig.HashTag[:2])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
switch sig.PubKeyAlgo {
|
||||||
|
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
|
||||||
|
sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r)
|
||||||
|
case PubKeyAlgoDSA:
|
||||||
|
sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r)
|
||||||
|
if err == nil {
|
||||||
|
sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r)
|
||||||
|
}
|
||||||
|
case PubKeyAlgoECDSA:
|
||||||
|
sig.ECDSASigR.bytes, sig.ECDSASigR.bitLength, err = readMPI(r)
|
||||||
|
if err == nil {
|
||||||
|
sig.ECDSASigS.bytes, sig.ECDSASigS.bitLength, err = readMPI(r)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseSignatureSubpackets parses subpackets of the main signature packet. See
|
||||||
|
// RFC 4880, section 5.2.3.1.
|
||||||
|
func parseSignatureSubpackets(sig *Signature, subpackets []byte, isHashed bool) (err error) {
|
||||||
|
for len(subpackets) > 0 {
|
||||||
|
subpackets, err = parseSignatureSubpacket(sig, subpackets, isHashed)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if sig.CreationTime.IsZero() {
|
||||||
|
err = errors.StructuralError("no creation time in signature")
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
type signatureSubpacketType uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
creationTimeSubpacket signatureSubpacketType = 2
|
||||||
|
signatureExpirationSubpacket signatureSubpacketType = 3
|
||||||
|
keyExpirationSubpacket signatureSubpacketType = 9
|
||||||
|
prefSymmetricAlgosSubpacket signatureSubpacketType = 11
|
||||||
|
issuerSubpacket signatureSubpacketType = 16
|
||||||
|
prefHashAlgosSubpacket signatureSubpacketType = 21
|
||||||
|
prefCompressionSubpacket signatureSubpacketType = 22
|
||||||
|
primaryUserIdSubpacket signatureSubpacketType = 25
|
||||||
|
keyFlagsSubpacket signatureSubpacketType = 27
|
||||||
|
reasonForRevocationSubpacket signatureSubpacketType = 29
|
||||||
|
featuresSubpacket signatureSubpacketType = 30
|
||||||
|
embeddedSignatureSubpacket signatureSubpacketType = 32
|
||||||
|
)
|
||||||
|
|
||||||
|
// parseSignatureSubpacket parses a single subpacket. len(subpacket) is >= 1.
|
||||||
|
func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (rest []byte, err error) {
|
||||||
|
// RFC 4880, section 5.2.3.1
|
||||||
|
var (
|
||||||
|
length uint32
|
||||||
|
packetType signatureSubpacketType
|
||||||
|
isCritical bool
|
||||||
|
)
|
||||||
|
switch {
|
||||||
|
case subpacket[0] < 192:
|
||||||
|
length = uint32(subpacket[0])
|
||||||
|
subpacket = subpacket[1:]
|
||||||
|
case subpacket[0] < 255:
|
||||||
|
if len(subpacket) < 2 {
|
||||||
|
goto Truncated
|
||||||
|
}
|
||||||
|
length = uint32(subpacket[0]-192)<<8 + uint32(subpacket[1]) + 192
|
||||||
|
subpacket = subpacket[2:]
|
||||||
|
default:
|
||||||
|
if len(subpacket) < 5 {
|
||||||
|
goto Truncated
|
||||||
|
}
|
||||||
|
length = uint32(subpacket[1])<<24 |
|
||||||
|
uint32(subpacket[2])<<16 |
|
||||||
|
uint32(subpacket[3])<<8 |
|
||||||
|
uint32(subpacket[4])
|
||||||
|
subpacket = subpacket[5:]
|
||||||
|
}
|
||||||
|
if length > uint32(len(subpacket)) {
|
||||||
|
goto Truncated
|
||||||
|
}
|
||||||
|
rest = subpacket[length:]
|
||||||
|
subpacket = subpacket[:length]
|
||||||
|
if len(subpacket) == 0 {
|
||||||
|
err = errors.StructuralError("zero length signature subpacket")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
packetType = signatureSubpacketType(subpacket[0] & 0x7f)
|
||||||
|
isCritical = subpacket[0]&0x80 == 0x80
|
||||||
|
subpacket = subpacket[1:]
|
||||||
|
sig.rawSubpackets = append(sig.rawSubpackets, outputSubpacket{isHashed, packetType, isCritical, subpacket})
|
||||||
|
switch packetType {
|
||||||
|
case creationTimeSubpacket:
|
||||||
|
if !isHashed {
|
||||||
|
err = errors.StructuralError("signature creation time in non-hashed area")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(subpacket) != 4 {
|
||||||
|
err = errors.StructuralError("signature creation time not four bytes")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
t := binary.BigEndian.Uint32(subpacket)
|
||||||
|
sig.CreationTime = time.Unix(int64(t), 0)
|
||||||
|
case signatureExpirationSubpacket:
|
||||||
|
// Signature expiration time, section 5.2.3.10
|
||||||
|
if !isHashed {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(subpacket) != 4 {
|
||||||
|
err = errors.StructuralError("expiration subpacket with bad length")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sig.SigLifetimeSecs = new(uint32)
|
||||||
|
*sig.SigLifetimeSecs = binary.BigEndian.Uint32(subpacket)
|
||||||
|
case keyExpirationSubpacket:
|
||||||
|
// Key expiration time, section 5.2.3.6
|
||||||
|
if !isHashed {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(subpacket) != 4 {
|
||||||
|
err = errors.StructuralError("key expiration subpacket with bad length")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sig.KeyLifetimeSecs = new(uint32)
|
||||||
|
*sig.KeyLifetimeSecs = binary.BigEndian.Uint32(subpacket)
|
||||||
|
case prefSymmetricAlgosSubpacket:
|
||||||
|
// Preferred symmetric algorithms, section 5.2.3.7
|
||||||
|
if !isHashed {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sig.PreferredSymmetric = make([]byte, len(subpacket))
|
||||||
|
copy(sig.PreferredSymmetric, subpacket)
|
||||||
|
case issuerSubpacket:
|
||||||
|
// Issuer, section 5.2.3.5
|
||||||
|
if len(subpacket) != 8 {
|
||||||
|
err = errors.StructuralError("issuer subpacket with bad length")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sig.IssuerKeyId = new(uint64)
|
||||||
|
*sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket)
|
||||||
|
case prefHashAlgosSubpacket:
|
||||||
|
// Preferred hash algorithms, section 5.2.3.8
|
||||||
|
if !isHashed {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sig.PreferredHash = make([]byte, len(subpacket))
|
||||||
|
copy(sig.PreferredHash, subpacket)
|
||||||
|
case prefCompressionSubpacket:
|
||||||
|
// Preferred compression algorithms, section 5.2.3.9
|
||||||
|
if !isHashed {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sig.PreferredCompression = make([]byte, len(subpacket))
|
||||||
|
copy(sig.PreferredCompression, subpacket)
|
||||||
|
case primaryUserIdSubpacket:
|
||||||
|
// Primary User ID, section 5.2.3.19
|
||||||
|
if !isHashed {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(subpacket) != 1 {
|
||||||
|
err = errors.StructuralError("primary user id subpacket with bad length")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sig.IsPrimaryId = new(bool)
|
||||||
|
if subpacket[0] > 0 {
|
||||||
|
*sig.IsPrimaryId = true
|
||||||
|
}
|
||||||
|
case keyFlagsSubpacket:
|
||||||
|
// Key flags, section 5.2.3.21
|
||||||
|
if !isHashed {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(subpacket) == 0 {
|
||||||
|
err = errors.StructuralError("empty key flags subpacket")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sig.FlagsValid = true
|
||||||
|
if subpacket[0]&KeyFlagCertify != 0 {
|
||||||
|
sig.FlagCertify = true
|
||||||
|
}
|
||||||
|
if subpacket[0]&KeyFlagSign != 0 {
|
||||||
|
sig.FlagSign = true
|
||||||
|
}
|
||||||
|
if subpacket[0]&KeyFlagEncryptCommunications != 0 {
|
||||||
|
sig.FlagEncryptCommunications = true
|
||||||
|
}
|
||||||
|
if subpacket[0]&KeyFlagEncryptStorage != 0 {
|
||||||
|
sig.FlagEncryptStorage = true
|
||||||
|
}
|
||||||
|
case reasonForRevocationSubpacket:
|
||||||
|
// Reason For Revocation, section 5.2.3.23
|
||||||
|
if !isHashed {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(subpacket) == 0 {
|
||||||
|
err = errors.StructuralError("empty revocation reason subpacket")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sig.RevocationReason = new(uint8)
|
||||||
|
*sig.RevocationReason = subpacket[0]
|
||||||
|
sig.RevocationReasonText = string(subpacket[1:])
|
||||||
|
case featuresSubpacket:
|
||||||
|
// Features subpacket, section 5.2.3.24 specifies a very general
|
||||||
|
// mechanism for OpenPGP implementations to signal support for new
|
||||||
|
// features. In practice, the subpacket is used exclusively to
|
||||||
|
// indicate support for MDC-protected encryption.
|
||||||
|
sig.MDC = len(subpacket) >= 1 && subpacket[0]&1 == 1
|
||||||
|
case embeddedSignatureSubpacket:
|
||||||
|
// Only usage is in signatures that cross-certify
|
||||||
|
// signing subkeys. section 5.2.3.26 describes the
|
||||||
|
// format, with its usage described in section 11.1
|
||||||
|
if sig.EmbeddedSignature != nil {
|
||||||
|
err = errors.StructuralError("Cannot have multiple embedded signatures")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sig.EmbeddedSignature = new(Signature)
|
||||||
|
// Embedded signatures are required to be v4 signatures see
|
||||||
|
// section 12.1. However, we only parse v4 signatures in this
|
||||||
|
// file anyway.
|
||||||
|
if err := sig.EmbeddedSignature.parse(bytes.NewBuffer(subpacket)); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if sigType := sig.EmbeddedSignature.SigType; sigType != SigTypePrimaryKeyBinding {
|
||||||
|
return nil, errors.StructuralError("cross-signature has unexpected type " + strconv.Itoa(int(sigType)))
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
if isCritical {
|
||||||
|
err = errors.UnsupportedError("unknown critical signature subpacket type " + strconv.Itoa(int(packetType)))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
|
||||||
|
Truncated:
|
||||||
|
err = errors.StructuralError("signature subpacket truncated")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// subpacketLengthLength returns the length, in bytes, of an encoded length value.
|
||||||
|
func subpacketLengthLength(length int) int {
|
||||||
|
if length < 192 {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
if length < 16320 {
|
||||||
|
return 2
|
||||||
|
}
|
||||||
|
return 5
|
||||||
|
}
|
||||||
|
|
||||||
|
// serializeSubpacketLength marshals the given length into to.
|
||||||
|
func serializeSubpacketLength(to []byte, length int) int {
|
||||||
|
// RFC 4880, Section 4.2.2.
|
||||||
|
if length < 192 {
|
||||||
|
to[0] = byte(length)
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
if length < 16320 {
|
||||||
|
length -= 192
|
||||||
|
to[0] = byte((length >> 8) + 192)
|
||||||
|
to[1] = byte(length)
|
||||||
|
return 2
|
||||||
|
}
|
||||||
|
to[0] = 255
|
||||||
|
to[1] = byte(length >> 24)
|
||||||
|
to[2] = byte(length >> 16)
|
||||||
|
to[3] = byte(length >> 8)
|
||||||
|
to[4] = byte(length)
|
||||||
|
return 5
|
||||||
|
}
|
||||||
|
|
||||||
|
// subpacketsLength returns the serialized length, in bytes, of the given
|
||||||
|
// subpackets.
|
||||||
|
func subpacketsLength(subpackets []outputSubpacket, hashed bool) (length int) {
|
||||||
|
for _, subpacket := range subpackets {
|
||||||
|
if subpacket.hashed == hashed {
|
||||||
|
length += subpacketLengthLength(len(subpacket.contents) + 1)
|
||||||
|
length += 1 // type byte
|
||||||
|
length += len(subpacket.contents)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// serializeSubpackets marshals the given subpackets into to.
|
||||||
|
func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) {
|
||||||
|
for _, subpacket := range subpackets {
|
||||||
|
if subpacket.hashed == hashed {
|
||||||
|
n := serializeSubpacketLength(to, len(subpacket.contents)+1)
|
||||||
|
to[n] = byte(subpacket.subpacketType)
|
||||||
|
to = to[1+n:]
|
||||||
|
n = copy(to, subpacket.contents)
|
||||||
|
to = to[n:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyExpired returns whether sig is a self-signature of a key that has
|
||||||
|
// expired.
|
||||||
|
func (sig *Signature) KeyExpired(currentTime time.Time) bool {
|
||||||
|
if sig.KeyLifetimeSecs == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
expiry := sig.CreationTime.Add(time.Duration(*sig.KeyLifetimeSecs) * time.Second)
|
||||||
|
return currentTime.After(expiry)
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildHashSuffix constructs the HashSuffix member of sig in preparation for signing.
|
||||||
|
func (sig *Signature) buildHashSuffix() (err error) {
|
||||||
|
hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true)
|
||||||
|
|
||||||
|
var ok bool
|
||||||
|
l := 6 + hashedSubpacketsLen
|
||||||
|
sig.HashSuffix = make([]byte, l+6)
|
||||||
|
sig.HashSuffix[0] = 4
|
||||||
|
sig.HashSuffix[1] = uint8(sig.SigType)
|
||||||
|
sig.HashSuffix[2] = uint8(sig.PubKeyAlgo)
|
||||||
|
sig.HashSuffix[3], ok = s2k.HashToHashId(sig.Hash)
|
||||||
|
if !ok {
|
||||||
|
sig.HashSuffix = nil
|
||||||
|
return errors.InvalidArgumentError("hash cannot be represented in OpenPGP: " + strconv.Itoa(int(sig.Hash)))
|
||||||
|
}
|
||||||
|
sig.HashSuffix[4] = byte(hashedSubpacketsLen >> 8)
|
||||||
|
sig.HashSuffix[5] = byte(hashedSubpacketsLen)
|
||||||
|
serializeSubpackets(sig.HashSuffix[6:l], sig.outSubpackets, true)
|
||||||
|
trailer := sig.HashSuffix[l:]
|
||||||
|
trailer[0] = 4
|
||||||
|
trailer[1] = 0xff
|
||||||
|
trailer[2] = byte(l >> 24)
|
||||||
|
trailer[3] = byte(l >> 16)
|
||||||
|
trailer[4] = byte(l >> 8)
|
||||||
|
trailer[5] = byte(l)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sig *Signature) signPrepareHash(h hash.Hash) (digest []byte, err error) {
|
||||||
|
err = sig.buildHashSuffix()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
h.Write(sig.HashSuffix)
|
||||||
|
digest = h.Sum(nil)
|
||||||
|
copy(sig.HashTag[:], digest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sign signs a message with a private key. The hash, h, must contain
|
||||||
|
// the hash of the message to be signed and will be mutated by this function.
|
||||||
|
// On success, the signature is stored in sig. Call Serialize to write it out.
|
||||||
|
// If config is nil, sensible defaults will be used.
|
||||||
|
func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err error) {
|
||||||
|
sig.outSubpackets = sig.buildSubpackets()
|
||||||
|
digest, err := sig.signPrepareHash(h)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
switch priv.PubKeyAlgo {
|
||||||
|
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
|
||||||
|
// supports both *rsa.PrivateKey and crypto.Signer
|
||||||
|
sig.RSASignature.bytes, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash)
|
||||||
|
sig.RSASignature.bitLength = uint16(8 * len(sig.RSASignature.bytes))
|
||||||
|
case PubKeyAlgoDSA:
|
||||||
|
dsaPriv := priv.PrivateKey.(*dsa.PrivateKey)
|
||||||
|
|
||||||
|
// Need to truncate hashBytes to match FIPS 186-3 section 4.6.
|
||||||
|
subgroupSize := (dsaPriv.Q.BitLen() + 7) / 8
|
||||||
|
if len(digest) > subgroupSize {
|
||||||
|
digest = digest[:subgroupSize]
|
||||||
|
}
|
||||||
|
r, s, err := dsa.Sign(config.Random(), dsaPriv, digest)
|
||||||
|
if err == nil {
|
||||||
|
sig.DSASigR.bytes = r.Bytes()
|
||||||
|
sig.DSASigR.bitLength = uint16(8 * len(sig.DSASigR.bytes))
|
||||||
|
sig.DSASigS.bytes = s.Bytes()
|
||||||
|
sig.DSASigS.bitLength = uint16(8 * len(sig.DSASigS.bytes))
|
||||||
|
}
|
||||||
|
case PubKeyAlgoECDSA:
|
||||||
|
var r, s *big.Int
|
||||||
|
if pk, ok := priv.PrivateKey.(*ecdsa.PrivateKey); ok {
|
||||||
|
// direct support, avoid asn1 wrapping/unwrapping
|
||||||
|
r, s, err = ecdsa.Sign(config.Random(), pk, digest)
|
||||||
|
} else {
|
||||||
|
var b []byte
|
||||||
|
b, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, nil)
|
||||||
|
if err == nil {
|
||||||
|
r, s, err = unwrapECDSASig(b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
sig.ECDSASigR = fromBig(r)
|
||||||
|
sig.ECDSASigS = fromBig(s)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
err = errors.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo)))
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// unwrapECDSASig parses the two integer components of an ASN.1-encoded ECDSA
|
||||||
|
// signature.
|
||||||
|
func unwrapECDSASig(b []byte) (r, s *big.Int, err error) {
|
||||||
|
var ecsdaSig struct {
|
||||||
|
R, S *big.Int
|
||||||
|
}
|
||||||
|
_, err = asn1.Unmarshal(b, &ecsdaSig)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return ecsdaSig.R, ecsdaSig.S, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SignUserId computes a signature from priv, asserting that pub is a valid
|
||||||
|
// key for the identity id. On success, the signature is stored in sig. Call
|
||||||
|
// Serialize to write it out.
|
||||||
|
// If config is nil, sensible defaults will be used.
|
||||||
|
func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey, config *Config) error {
|
||||||
|
h, err := userIdSignatureHash(id, pub, sig.Hash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return sig.Sign(h, priv, config)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SignKey computes a signature from priv, asserting that pub is a subkey. On
|
||||||
|
// success, the signature is stored in sig. Call Serialize to write it out.
|
||||||
|
// If config is nil, sensible defaults will be used.
|
||||||
|
func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey, config *Config) error {
|
||||||
|
h, err := keySignatureHash(&priv.PublicKey, pub, sig.Hash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return sig.Sign(h, priv, config)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been
|
||||||
|
// called first.
|
||||||
|
func (sig *Signature) Serialize(w io.Writer) (err error) {
|
||||||
|
if len(sig.outSubpackets) == 0 {
|
||||||
|
sig.outSubpackets = sig.rawSubpackets
|
||||||
|
}
|
||||||
|
if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil && sig.ECDSASigR.bytes == nil {
|
||||||
|
return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize")
|
||||||
|
}
|
||||||
|
|
||||||
|
sigLength := 0
|
||||||
|
switch sig.PubKeyAlgo {
|
||||||
|
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
|
||||||
|
sigLength = 2 + len(sig.RSASignature.bytes)
|
||||||
|
case PubKeyAlgoDSA:
|
||||||
|
sigLength = 2 + len(sig.DSASigR.bytes)
|
||||||
|
sigLength += 2 + len(sig.DSASigS.bytes)
|
||||||
|
case PubKeyAlgoECDSA:
|
||||||
|
sigLength = 2 + len(sig.ECDSASigR.bytes)
|
||||||
|
sigLength += 2 + len(sig.ECDSASigS.bytes)
|
||||||
|
default:
|
||||||
|
panic("impossible")
|
||||||
|
}
|
||||||
|
|
||||||
|
unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false)
|
||||||
|
length := len(sig.HashSuffix) - 6 /* trailer not included */ +
|
||||||
|
2 /* length of unhashed subpackets */ + unhashedSubpacketsLen +
|
||||||
|
2 /* hash tag */ + sigLength
|
||||||
|
err = serializeHeader(w, packetTypeSignature, length)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = w.Write(sig.HashSuffix[:len(sig.HashSuffix)-6])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
unhashedSubpackets := make([]byte, 2+unhashedSubpacketsLen)
|
||||||
|
unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8)
|
||||||
|
unhashedSubpackets[1] = byte(unhashedSubpacketsLen)
|
||||||
|
serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false)
|
||||||
|
|
||||||
|
_, err = w.Write(unhashedSubpackets)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_, err = w.Write(sig.HashTag[:])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
switch sig.PubKeyAlgo {
|
||||||
|
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
|
||||||
|
err = writeMPIs(w, sig.RSASignature)
|
||||||
|
case PubKeyAlgoDSA:
|
||||||
|
err = writeMPIs(w, sig.DSASigR, sig.DSASigS)
|
||||||
|
case PubKeyAlgoECDSA:
|
||||||
|
err = writeMPIs(w, sig.ECDSASigR, sig.ECDSASigS)
|
||||||
|
default:
|
||||||
|
panic("impossible")
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// outputSubpacket represents a subpacket to be marshaled.
|
||||||
|
type outputSubpacket struct {
|
||||||
|
hashed bool // true if this subpacket is in the hashed area.
|
||||||
|
subpacketType signatureSubpacketType
|
||||||
|
isCritical bool
|
||||||
|
contents []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sig *Signature) buildSubpackets() (subpackets []outputSubpacket) {
|
||||||
|
creationTime := make([]byte, 4)
|
||||||
|
binary.BigEndian.PutUint32(creationTime, uint32(sig.CreationTime.Unix()))
|
||||||
|
subpackets = append(subpackets, outputSubpacket{true, creationTimeSubpacket, false, creationTime})
|
||||||
|
|
||||||
|
if sig.IssuerKeyId != nil {
|
||||||
|
keyId := make([]byte, 8)
|
||||||
|
binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId)
|
||||||
|
subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, false, keyId})
|
||||||
|
}
|
||||||
|
|
||||||
|
if sig.SigLifetimeSecs != nil && *sig.SigLifetimeSecs != 0 {
|
||||||
|
sigLifetime := make([]byte, 4)
|
||||||
|
binary.BigEndian.PutUint32(sigLifetime, *sig.SigLifetimeSecs)
|
||||||
|
subpackets = append(subpackets, outputSubpacket{true, signatureExpirationSubpacket, true, sigLifetime})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Key flags may only appear in self-signatures or certification signatures.
|
||||||
|
|
||||||
|
if sig.FlagsValid {
|
||||||
|
var flags byte
|
||||||
|
if sig.FlagCertify {
|
||||||
|
flags |= KeyFlagCertify
|
||||||
|
}
|
||||||
|
if sig.FlagSign {
|
||||||
|
flags |= KeyFlagSign
|
||||||
|
}
|
||||||
|
if sig.FlagEncryptCommunications {
|
||||||
|
flags |= KeyFlagEncryptCommunications
|
||||||
|
}
|
||||||
|
if sig.FlagEncryptStorage {
|
||||||
|
flags |= KeyFlagEncryptStorage
|
||||||
|
}
|
||||||
|
subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{flags}})
|
||||||
|
}
|
||||||
|
|
||||||
|
// The following subpackets may only appear in self-signatures
|
||||||
|
|
||||||
|
if sig.KeyLifetimeSecs != nil && *sig.KeyLifetimeSecs != 0 {
|
||||||
|
keyLifetime := make([]byte, 4)
|
||||||
|
binary.BigEndian.PutUint32(keyLifetime, *sig.KeyLifetimeSecs)
|
||||||
|
subpackets = append(subpackets, outputSubpacket{true, keyExpirationSubpacket, true, keyLifetime})
|
||||||
|
}
|
||||||
|
|
||||||
|
if sig.IsPrimaryId != nil && *sig.IsPrimaryId {
|
||||||
|
subpackets = append(subpackets, outputSubpacket{true, primaryUserIdSubpacket, false, []byte{1}})
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(sig.PreferredSymmetric) > 0 {
|
||||||
|
subpackets = append(subpackets, outputSubpacket{true, prefSymmetricAlgosSubpacket, false, sig.PreferredSymmetric})
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(sig.PreferredHash) > 0 {
|
||||||
|
subpackets = append(subpackets, outputSubpacket{true, prefHashAlgosSubpacket, false, sig.PreferredHash})
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(sig.PreferredCompression) > 0 {
|
||||||
|
subpackets = append(subpackets, outputSubpacket{true, prefCompressionSubpacket, false, sig.PreferredCompression})
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
146
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go
generated
vendored
Normal file
146
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go
generated
vendored
Normal file
|
@ -0,0 +1,146 @@
|
||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package packet
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto"
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/crypto/openpgp/errors"
|
||||||
|
"golang.org/x/crypto/openpgp/s2k"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SignatureV3 represents older version 3 signatures. These signatures are less secure
|
||||||
|
// than version 4 and should not be used to create new signatures. They are included
|
||||||
|
// here for backwards compatibility to read and validate with older key material.
|
||||||
|
// See RFC 4880, section 5.2.2.
|
||||||
|
type SignatureV3 struct {
|
||||||
|
SigType SignatureType
|
||||||
|
CreationTime time.Time
|
||||||
|
IssuerKeyId uint64
|
||||||
|
PubKeyAlgo PublicKeyAlgorithm
|
||||||
|
Hash crypto.Hash
|
||||||
|
HashTag [2]byte
|
||||||
|
|
||||||
|
RSASignature parsedMPI
|
||||||
|
DSASigR, DSASigS parsedMPI
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sig *SignatureV3) parse(r io.Reader) (err error) {
|
||||||
|
// RFC 4880, section 5.2.2
|
||||||
|
var buf [8]byte
|
||||||
|
if _, err = readFull(r, buf[:1]); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if buf[0] < 2 || buf[0] > 3 {
|
||||||
|
err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0])))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if _, err = readFull(r, buf[:1]); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if buf[0] != 5 {
|
||||||
|
err = errors.UnsupportedError(
|
||||||
|
"invalid hashed material length " + strconv.Itoa(int(buf[0])))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read hashed material: signature type + creation time
|
||||||
|
if _, err = readFull(r, buf[:5]); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sig.SigType = SignatureType(buf[0])
|
||||||
|
t := binary.BigEndian.Uint32(buf[1:5])
|
||||||
|
sig.CreationTime = time.Unix(int64(t), 0)
|
||||||
|
|
||||||
|
// Eight-octet Key ID of signer.
|
||||||
|
if _, err = readFull(r, buf[:8]); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sig.IssuerKeyId = binary.BigEndian.Uint64(buf[:])
|
||||||
|
|
||||||
|
// Public-key and hash algorithm
|
||||||
|
if _, err = readFull(r, buf[:2]); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sig.PubKeyAlgo = PublicKeyAlgorithm(buf[0])
|
||||||
|
switch sig.PubKeyAlgo {
|
||||||
|
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA:
|
||||||
|
default:
|
||||||
|
err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo)))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var ok bool
|
||||||
|
if sig.Hash, ok = s2k.HashIdToHash(buf[1]); !ok {
|
||||||
|
return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2])))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Two-octet field holding left 16 bits of signed hash value.
|
||||||
|
if _, err = readFull(r, sig.HashTag[:2]); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
switch sig.PubKeyAlgo {
|
||||||
|
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
|
||||||
|
sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r)
|
||||||
|
case PubKeyAlgoDSA:
|
||||||
|
if sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r)
|
||||||
|
default:
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been
|
||||||
|
// called first.
|
||||||
|
func (sig *SignatureV3) Serialize(w io.Writer) (err error) {
|
||||||
|
buf := make([]byte, 8)
|
||||||
|
|
||||||
|
// Write the sig type and creation time
|
||||||
|
buf[0] = byte(sig.SigType)
|
||||||
|
binary.BigEndian.PutUint32(buf[1:5], uint32(sig.CreationTime.Unix()))
|
||||||
|
if _, err = w.Write(buf[:5]); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write the issuer long key ID
|
||||||
|
binary.BigEndian.PutUint64(buf[:8], sig.IssuerKeyId)
|
||||||
|
if _, err = w.Write(buf[:8]); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write public key algorithm, hash ID, and hash value
|
||||||
|
buf[0] = byte(sig.PubKeyAlgo)
|
||||||
|
hashId, ok := s2k.HashToHashId(sig.Hash)
|
||||||
|
if !ok {
|
||||||
|
return errors.UnsupportedError(fmt.Sprintf("hash function %v", sig.Hash))
|
||||||
|
}
|
||||||
|
buf[1] = hashId
|
||||||
|
copy(buf[2:4], sig.HashTag[:])
|
||||||
|
if _, err = w.Write(buf[:4]); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil {
|
||||||
|
return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize")
|
||||||
|
}
|
||||||
|
|
||||||
|
switch sig.PubKeyAlgo {
|
||||||
|
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
|
||||||
|
err = writeMPIs(w, sig.RSASignature)
|
||||||
|
case PubKeyAlgoDSA:
|
||||||
|
err = writeMPIs(w, sig.DSASigR, sig.DSASigS)
|
||||||
|
default:
|
||||||
|
panic("impossible")
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
155
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go
generated
vendored
Normal file
155
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go
generated
vendored
Normal file
|
@ -0,0 +1,155 @@
|
||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package packet
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/cipher"
|
||||||
|
"io"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"golang.org/x/crypto/openpgp/errors"
|
||||||
|
"golang.org/x/crypto/openpgp/s2k"
|
||||||
|
)
|
||||||
|
|
||||||
|
// This is the largest session key that we'll support. Since no 512-bit cipher
|
||||||
|
// has even been seriously used, this is comfortably large.
|
||||||
|
const maxSessionKeySizeInBytes = 64
|
||||||
|
|
||||||
|
// SymmetricKeyEncrypted represents a passphrase protected session key. See RFC
|
||||||
|
// 4880, section 5.3.
|
||||||
|
type SymmetricKeyEncrypted struct {
|
||||||
|
CipherFunc CipherFunction
|
||||||
|
s2k func(out, in []byte)
|
||||||
|
encryptedKey []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
const symmetricKeyEncryptedVersion = 4
|
||||||
|
|
||||||
|
func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error {
|
||||||
|
// RFC 4880, section 5.3.
|
||||||
|
var buf [2]byte
|
||||||
|
if _, err := readFull(r, buf[:]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if buf[0] != symmetricKeyEncryptedVersion {
|
||||||
|
return errors.UnsupportedError("SymmetricKeyEncrypted version")
|
||||||
|
}
|
||||||
|
ske.CipherFunc = CipherFunction(buf[1])
|
||||||
|
|
||||||
|
if ske.CipherFunc.KeySize() == 0 {
|
||||||
|
return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[1])))
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
ske.s2k, err = s2k.Parse(r)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
encryptedKey := make([]byte, maxSessionKeySizeInBytes)
|
||||||
|
// The session key may follow. We just have to try and read to find
|
||||||
|
// out. If it exists then we limit it to maxSessionKeySizeInBytes.
|
||||||
|
n, err := readFull(r, encryptedKey)
|
||||||
|
if err != nil && err != io.ErrUnexpectedEOF {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if n != 0 {
|
||||||
|
if n == maxSessionKeySizeInBytes {
|
||||||
|
return errors.UnsupportedError("oversized encrypted session key")
|
||||||
|
}
|
||||||
|
ske.encryptedKey = encryptedKey[:n]
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decrypt attempts to decrypt an encrypted session key and returns the key and
|
||||||
|
// the cipher to use when decrypting a subsequent Symmetrically Encrypted Data
|
||||||
|
// packet.
|
||||||
|
func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) ([]byte, CipherFunction, error) {
|
||||||
|
key := make([]byte, ske.CipherFunc.KeySize())
|
||||||
|
ske.s2k(key, passphrase)
|
||||||
|
|
||||||
|
if len(ske.encryptedKey) == 0 {
|
||||||
|
return key, ske.CipherFunc, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// the IV is all zeros
|
||||||
|
iv := make([]byte, ske.CipherFunc.blockSize())
|
||||||
|
c := cipher.NewCFBDecrypter(ske.CipherFunc.new(key), iv)
|
||||||
|
plaintextKey := make([]byte, len(ske.encryptedKey))
|
||||||
|
c.XORKeyStream(plaintextKey, ske.encryptedKey)
|
||||||
|
cipherFunc := CipherFunction(plaintextKey[0])
|
||||||
|
if cipherFunc.blockSize() == 0 {
|
||||||
|
return nil, ske.CipherFunc, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc)))
|
||||||
|
}
|
||||||
|
plaintextKey = plaintextKey[1:]
|
||||||
|
if l := len(plaintextKey); l == 0 || l%cipherFunc.blockSize() != 0 {
|
||||||
|
return nil, cipherFunc, errors.StructuralError("length of decrypted key not a multiple of block size")
|
||||||
|
}
|
||||||
|
|
||||||
|
return plaintextKey, cipherFunc, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SerializeSymmetricKeyEncrypted serializes a symmetric key packet to w. The
|
||||||
|
// packet contains a random session key, encrypted by a key derived from the
|
||||||
|
// given passphrase. The session key is returned and must be passed to
|
||||||
|
// SerializeSymmetricallyEncrypted.
|
||||||
|
// If config is nil, sensible defaults will be used.
|
||||||
|
func SerializeSymmetricKeyEncrypted(w io.Writer, passphrase []byte, config *Config) (key []byte, err error) {
|
||||||
|
cipherFunc := config.Cipher()
|
||||||
|
keySize := cipherFunc.KeySize()
|
||||||
|
if keySize == 0 {
|
||||||
|
return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc)))
|
||||||
|
}
|
||||||
|
|
||||||
|
s2kBuf := new(bytes.Buffer)
|
||||||
|
keyEncryptingKey := make([]byte, keySize)
|
||||||
|
// s2k.Serialize salts and stretches the passphrase, and writes the
|
||||||
|
// resulting key to keyEncryptingKey and the s2k descriptor to s2kBuf.
|
||||||
|
err = s2k.Serialize(s2kBuf, keyEncryptingKey, config.Random(), passphrase, &s2k.Config{Hash: config.Hash(), S2KCount: config.PasswordHashIterations()})
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s2kBytes := s2kBuf.Bytes()
|
||||||
|
|
||||||
|
packetLength := 2 /* header */ + len(s2kBytes) + 1 /* cipher type */ + keySize
|
||||||
|
err = serializeHeader(w, packetTypeSymmetricKeyEncrypted, packetLength)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf [2]byte
|
||||||
|
buf[0] = symmetricKeyEncryptedVersion
|
||||||
|
buf[1] = byte(cipherFunc)
|
||||||
|
_, err = w.Write(buf[:])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_, err = w.Write(s2kBytes)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
sessionKey := make([]byte, keySize)
|
||||||
|
_, err = io.ReadFull(config.Random(), sessionKey)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
iv := make([]byte, cipherFunc.blockSize())
|
||||||
|
c := cipher.NewCFBEncrypter(cipherFunc.new(keyEncryptingKey), iv)
|
||||||
|
encryptedCipherAndKey := make([]byte, keySize+1)
|
||||||
|
c.XORKeyStream(encryptedCipherAndKey, buf[1:])
|
||||||
|
c.XORKeyStream(encryptedCipherAndKey[1:], sessionKey)
|
||||||
|
_, err = w.Write(encryptedCipherAndKey)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
key = sessionKey
|
||||||
|
return
|
||||||
|
}
|
290
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go
generated
vendored
Normal file
290
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go
generated
vendored
Normal file
|
@ -0,0 +1,290 @@
|
||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package packet
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/cipher"
|
||||||
|
"crypto/sha1"
|
||||||
|
"crypto/subtle"
|
||||||
|
"golang.org/x/crypto/openpgp/errors"
|
||||||
|
"hash"
|
||||||
|
"io"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SymmetricallyEncrypted represents a symmetrically encrypted byte string. The
|
||||||
|
// encrypted contents will consist of more OpenPGP packets. See RFC 4880,
|
||||||
|
// sections 5.7 and 5.13.
|
||||||
|
type SymmetricallyEncrypted struct {
|
||||||
|
MDC bool // true iff this is a type 18 packet and thus has an embedded MAC.
|
||||||
|
contents io.Reader
|
||||||
|
prefix []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
const symmetricallyEncryptedVersion = 1
|
||||||
|
|
||||||
|
func (se *SymmetricallyEncrypted) parse(r io.Reader) error {
|
||||||
|
if se.MDC {
|
||||||
|
// See RFC 4880, section 5.13.
|
||||||
|
var buf [1]byte
|
||||||
|
_, err := readFull(r, buf[:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if buf[0] != symmetricallyEncryptedVersion {
|
||||||
|
return errors.UnsupportedError("unknown SymmetricallyEncrypted version")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
se.contents = r
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decrypt returns a ReadCloser, from which the decrypted contents of the
|
||||||
|
// packet can be read. An incorrect key can, with high probability, be detected
|
||||||
|
// immediately and this will result in a KeyIncorrect error being returned.
|
||||||
|
func (se *SymmetricallyEncrypted) Decrypt(c CipherFunction, key []byte) (io.ReadCloser, error) {
|
||||||
|
keySize := c.KeySize()
|
||||||
|
if keySize == 0 {
|
||||||
|
return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(c)))
|
||||||
|
}
|
||||||
|
if len(key) != keySize {
|
||||||
|
return nil, errors.InvalidArgumentError("SymmetricallyEncrypted: incorrect key length")
|
||||||
|
}
|
||||||
|
|
||||||
|
if se.prefix == nil {
|
||||||
|
se.prefix = make([]byte, c.blockSize()+2)
|
||||||
|
_, err := readFull(se.contents, se.prefix)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else if len(se.prefix) != c.blockSize()+2 {
|
||||||
|
return nil, errors.InvalidArgumentError("can't try ciphers with different block lengths")
|
||||||
|
}
|
||||||
|
|
||||||
|
ocfbResync := OCFBResync
|
||||||
|
if se.MDC {
|
||||||
|
// MDC packets use a different form of OCFB mode.
|
||||||
|
ocfbResync = OCFBNoResync
|
||||||
|
}
|
||||||
|
|
||||||
|
s := NewOCFBDecrypter(c.new(key), se.prefix, ocfbResync)
|
||||||
|
if s == nil {
|
||||||
|
return nil, errors.ErrKeyIncorrect
|
||||||
|
}
|
||||||
|
|
||||||
|
plaintext := cipher.StreamReader{S: s, R: se.contents}
|
||||||
|
|
||||||
|
if se.MDC {
|
||||||
|
// MDC packets have an embedded hash that we need to check.
|
||||||
|
h := sha1.New()
|
||||||
|
h.Write(se.prefix)
|
||||||
|
return &seMDCReader{in: plaintext, h: h}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise, we just need to wrap plaintext so that it's a valid ReadCloser.
|
||||||
|
return seReader{plaintext}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// seReader wraps an io.Reader with a no-op Close method.
|
||||||
|
type seReader struct {
|
||||||
|
in io.Reader
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ser seReader) Read(buf []byte) (int, error) {
|
||||||
|
return ser.in.Read(buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ser seReader) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const mdcTrailerSize = 1 /* tag byte */ + 1 /* length byte */ + sha1.Size
|
||||||
|
|
||||||
|
// An seMDCReader wraps an io.Reader, maintains a running hash and keeps hold
|
||||||
|
// of the most recent 22 bytes (mdcTrailerSize). Upon EOF, those bytes form an
|
||||||
|
// MDC packet containing a hash of the previous contents which is checked
|
||||||
|
// against the running hash. See RFC 4880, section 5.13.
|
||||||
|
type seMDCReader struct {
|
||||||
|
in io.Reader
|
||||||
|
h hash.Hash
|
||||||
|
trailer [mdcTrailerSize]byte
|
||||||
|
scratch [mdcTrailerSize]byte
|
||||||
|
trailerUsed int
|
||||||
|
error bool
|
||||||
|
eof bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ser *seMDCReader) Read(buf []byte) (n int, err error) {
|
||||||
|
if ser.error {
|
||||||
|
err = io.ErrUnexpectedEOF
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if ser.eof {
|
||||||
|
err = io.EOF
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we haven't yet filled the trailer buffer then we must do that
|
||||||
|
// first.
|
||||||
|
for ser.trailerUsed < mdcTrailerSize {
|
||||||
|
n, err = ser.in.Read(ser.trailer[ser.trailerUsed:])
|
||||||
|
ser.trailerUsed += n
|
||||||
|
if err == io.EOF {
|
||||||
|
if ser.trailerUsed != mdcTrailerSize {
|
||||||
|
n = 0
|
||||||
|
err = io.ErrUnexpectedEOF
|
||||||
|
ser.error = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ser.eof = true
|
||||||
|
n = 0
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
n = 0
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If it's a short read then we read into a temporary buffer and shift
|
||||||
|
// the data into the caller's buffer.
|
||||||
|
if len(buf) <= mdcTrailerSize {
|
||||||
|
n, err = readFull(ser.in, ser.scratch[:len(buf)])
|
||||||
|
copy(buf, ser.trailer[:n])
|
||||||
|
ser.h.Write(buf[:n])
|
||||||
|
copy(ser.trailer[:], ser.trailer[n:])
|
||||||
|
copy(ser.trailer[mdcTrailerSize-n:], ser.scratch[:])
|
||||||
|
if n < len(buf) {
|
||||||
|
ser.eof = true
|
||||||
|
err = io.EOF
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err = ser.in.Read(buf[mdcTrailerSize:])
|
||||||
|
copy(buf, ser.trailer[:])
|
||||||
|
ser.h.Write(buf[:n])
|
||||||
|
copy(ser.trailer[:], buf[n:])
|
||||||
|
|
||||||
|
if err == io.EOF {
|
||||||
|
ser.eof = true
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is a new-format packet tag byte for a type 19 (MDC) packet.
|
||||||
|
const mdcPacketTagByte = byte(0x80) | 0x40 | 19
|
||||||
|
|
||||||
|
func (ser *seMDCReader) Close() error {
|
||||||
|
if ser.error {
|
||||||
|
return errors.SignatureError("error during reading")
|
||||||
|
}
|
||||||
|
|
||||||
|
for !ser.eof {
|
||||||
|
// We haven't seen EOF so we need to read to the end
|
||||||
|
var buf [1024]byte
|
||||||
|
_, err := ser.Read(buf[:])
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return errors.SignatureError("error during reading")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ser.trailer[0] != mdcPacketTagByte || ser.trailer[1] != sha1.Size {
|
||||||
|
return errors.SignatureError("MDC packet not found")
|
||||||
|
}
|
||||||
|
ser.h.Write(ser.trailer[:2])
|
||||||
|
|
||||||
|
final := ser.h.Sum(nil)
|
||||||
|
if subtle.ConstantTimeCompare(final, ser.trailer[2:]) != 1 {
|
||||||
|
return errors.SignatureError("hash mismatch")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// An seMDCWriter writes through to an io.WriteCloser while maintains a running
|
||||||
|
// hash of the data written. On close, it emits an MDC packet containing the
|
||||||
|
// running hash.
|
||||||
|
type seMDCWriter struct {
|
||||||
|
w io.WriteCloser
|
||||||
|
h hash.Hash
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *seMDCWriter) Write(buf []byte) (n int, err error) {
|
||||||
|
w.h.Write(buf)
|
||||||
|
return w.w.Write(buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *seMDCWriter) Close() (err error) {
|
||||||
|
var buf [mdcTrailerSize]byte
|
||||||
|
|
||||||
|
buf[0] = mdcPacketTagByte
|
||||||
|
buf[1] = sha1.Size
|
||||||
|
w.h.Write(buf[:2])
|
||||||
|
digest := w.h.Sum(nil)
|
||||||
|
copy(buf[2:], digest)
|
||||||
|
|
||||||
|
_, err = w.w.Write(buf[:])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return w.w.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// noOpCloser is like an ioutil.NopCloser, but for an io.Writer.
|
||||||
|
type noOpCloser struct {
|
||||||
|
w io.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c noOpCloser) Write(data []byte) (n int, err error) {
|
||||||
|
return c.w.Write(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c noOpCloser) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SerializeSymmetricallyEncrypted serializes a symmetrically encrypted packet
|
||||||
|
// to w and returns a WriteCloser to which the to-be-encrypted packets can be
|
||||||
|
// written.
|
||||||
|
// If config is nil, sensible defaults will be used.
|
||||||
|
func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, key []byte, config *Config) (contents io.WriteCloser, err error) {
|
||||||
|
if c.KeySize() != len(key) {
|
||||||
|
return nil, errors.InvalidArgumentError("SymmetricallyEncrypted.Serialize: bad key length")
|
||||||
|
}
|
||||||
|
writeCloser := noOpCloser{w}
|
||||||
|
ciphertext, err := serializeStreamHeader(writeCloser, packetTypeSymmetricallyEncryptedMDC)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = ciphertext.Write([]byte{symmetricallyEncryptedVersion})
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
block := c.new(key)
|
||||||
|
blockSize := block.BlockSize()
|
||||||
|
iv := make([]byte, blockSize)
|
||||||
|
_, err = config.Random().Read(iv)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s, prefix := NewOCFBEncrypter(block, iv, OCFBNoResync)
|
||||||
|
_, err = ciphertext.Write(prefix)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
plaintext := cipher.StreamWriter{S: s, W: ciphertext}
|
||||||
|
|
||||||
|
h := sha1.New()
|
||||||
|
h.Write(iv)
|
||||||
|
h.Write(iv[blockSize-2:])
|
||||||
|
contents = &seMDCWriter{w: plaintext, h: h}
|
||||||
|
return
|
||||||
|
}
|
91
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go
generated
vendored
Normal file
91
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go
generated
vendored
Normal file
|
@ -0,0 +1,91 @@
|
||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package packet
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"image"
|
||||||
|
"image/jpeg"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
const UserAttrImageSubpacket = 1
|
||||||
|
|
||||||
|
// UserAttribute is capable of storing other types of data about a user
|
||||||
|
// beyond name, email and a text comment. In practice, user attributes are typically used
|
||||||
|
// to store a signed thumbnail photo JPEG image of the user.
|
||||||
|
// See RFC 4880, section 5.12.
|
||||||
|
type UserAttribute struct {
|
||||||
|
Contents []*OpaqueSubpacket
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewUserAttributePhoto creates a user attribute packet
|
||||||
|
// containing the given images.
|
||||||
|
func NewUserAttributePhoto(photos ...image.Image) (uat *UserAttribute, err error) {
|
||||||
|
uat = new(UserAttribute)
|
||||||
|
for _, photo := range photos {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
// RFC 4880, Section 5.12.1.
|
||||||
|
data := []byte{
|
||||||
|
0x10, 0x00, // Little-endian image header length (16 bytes)
|
||||||
|
0x01, // Image header version 1
|
||||||
|
0x01, // JPEG
|
||||||
|
0, 0, 0, 0, // 12 reserved octets, must be all zero.
|
||||||
|
0, 0, 0, 0,
|
||||||
|
0, 0, 0, 0}
|
||||||
|
if _, err = buf.Write(data); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = jpeg.Encode(&buf, photo, nil); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
uat.Contents = append(uat.Contents, &OpaqueSubpacket{
|
||||||
|
SubType: UserAttrImageSubpacket,
|
||||||
|
Contents: buf.Bytes()})
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewUserAttribute creates a new user attribute packet containing the given subpackets.
|
||||||
|
func NewUserAttribute(contents ...*OpaqueSubpacket) *UserAttribute {
|
||||||
|
return &UserAttribute{Contents: contents}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (uat *UserAttribute) parse(r io.Reader) (err error) {
|
||||||
|
// RFC 4880, section 5.13
|
||||||
|
b, err := ioutil.ReadAll(r)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
uat.Contents, err = OpaqueSubpackets(b)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Serialize marshals the user attribute to w in the form of an OpenPGP packet, including
|
||||||
|
// header.
|
||||||
|
func (uat *UserAttribute) Serialize(w io.Writer) (err error) {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
for _, sp := range uat.Contents {
|
||||||
|
sp.Serialize(&buf)
|
||||||
|
}
|
||||||
|
if err = serializeHeader(w, packetTypeUserAttribute, buf.Len()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = w.Write(buf.Bytes())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// ImageData returns zero or more byte slices, each containing
|
||||||
|
// JPEG File Interchange Format (JFIF), for each photo in the
|
||||||
|
// the user attribute packet.
|
||||||
|
func (uat *UserAttribute) ImageData() (imageData [][]byte) {
|
||||||
|
for _, sp := range uat.Contents {
|
||||||
|
if sp.SubType == UserAttrImageSubpacket && len(sp.Contents) > 16 {
|
||||||
|
imageData = append(imageData, sp.Contents[16:])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
160
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/packet/userid.go
generated
vendored
Normal file
160
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/packet/userid.go
generated
vendored
Normal file
|
@ -0,0 +1,160 @@
|
||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package packet
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// UserId contains text that is intended to represent the name and email
|
||||||
|
// address of the key holder. See RFC 4880, section 5.11. By convention, this
|
||||||
|
// takes the form "Full Name (Comment) <email@example.com>"
|
||||||
|
type UserId struct {
|
||||||
|
Id string // By convention, this takes the form "Full Name (Comment) <email@example.com>" which is split out in the fields below.
|
||||||
|
|
||||||
|
Name, Comment, Email string
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasInvalidCharacters(s string) bool {
|
||||||
|
for _, c := range s {
|
||||||
|
switch c {
|
||||||
|
case '(', ')', '<', '>', 0:
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewUserId returns a UserId or nil if any of the arguments contain invalid
|
||||||
|
// characters. The invalid characters are '\x00', '(', ')', '<' and '>'
|
||||||
|
func NewUserId(name, comment, email string) *UserId {
|
||||||
|
// RFC 4880 doesn't deal with the structure of userid strings; the
|
||||||
|
// name, comment and email form is just a convention. However, there's
|
||||||
|
// no convention about escaping the metacharacters and GPG just refuses
|
||||||
|
// to create user ids where, say, the name contains a '('. We mirror
|
||||||
|
// this behaviour.
|
||||||
|
|
||||||
|
if hasInvalidCharacters(name) || hasInvalidCharacters(comment) || hasInvalidCharacters(email) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
uid := new(UserId)
|
||||||
|
uid.Name, uid.Comment, uid.Email = name, comment, email
|
||||||
|
uid.Id = name
|
||||||
|
if len(comment) > 0 {
|
||||||
|
if len(uid.Id) > 0 {
|
||||||
|
uid.Id += " "
|
||||||
|
}
|
||||||
|
uid.Id += "("
|
||||||
|
uid.Id += comment
|
||||||
|
uid.Id += ")"
|
||||||
|
}
|
||||||
|
if len(email) > 0 {
|
||||||
|
if len(uid.Id) > 0 {
|
||||||
|
uid.Id += " "
|
||||||
|
}
|
||||||
|
uid.Id += "<"
|
||||||
|
uid.Id += email
|
||||||
|
uid.Id += ">"
|
||||||
|
}
|
||||||
|
return uid
|
||||||
|
}
|
||||||
|
|
||||||
|
func (uid *UserId) parse(r io.Reader) (err error) {
|
||||||
|
// RFC 4880, section 5.11
|
||||||
|
b, err := ioutil.ReadAll(r)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
uid.Id = string(b)
|
||||||
|
uid.Name, uid.Comment, uid.Email = parseUserId(uid.Id)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Serialize marshals uid to w in the form of an OpenPGP packet, including
|
||||||
|
// header.
|
||||||
|
func (uid *UserId) Serialize(w io.Writer) error {
|
||||||
|
err := serializeHeader(w, packetTypeUserId, len(uid.Id))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = w.Write([]byte(uid.Id))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseUserId extracts the name, comment and email from a user id string that
|
||||||
|
// is formatted as "Full Name (Comment) <email@example.com>".
|
||||||
|
func parseUserId(id string) (name, comment, email string) {
|
||||||
|
var n, c, e struct {
|
||||||
|
start, end int
|
||||||
|
}
|
||||||
|
var state int
|
||||||
|
|
||||||
|
for offset, rune := range id {
|
||||||
|
switch state {
|
||||||
|
case 0:
|
||||||
|
// Entering name
|
||||||
|
n.start = offset
|
||||||
|
state = 1
|
||||||
|
fallthrough
|
||||||
|
case 1:
|
||||||
|
// In name
|
||||||
|
if rune == '(' {
|
||||||
|
state = 2
|
||||||
|
n.end = offset
|
||||||
|
} else if rune == '<' {
|
||||||
|
state = 5
|
||||||
|
n.end = offset
|
||||||
|
}
|
||||||
|
case 2:
|
||||||
|
// Entering comment
|
||||||
|
c.start = offset
|
||||||
|
state = 3
|
||||||
|
fallthrough
|
||||||
|
case 3:
|
||||||
|
// In comment
|
||||||
|
if rune == ')' {
|
||||||
|
state = 4
|
||||||
|
c.end = offset
|
||||||
|
}
|
||||||
|
case 4:
|
||||||
|
// Between comment and email
|
||||||
|
if rune == '<' {
|
||||||
|
state = 5
|
||||||
|
}
|
||||||
|
case 5:
|
||||||
|
// Entering email
|
||||||
|
e.start = offset
|
||||||
|
state = 6
|
||||||
|
fallthrough
|
||||||
|
case 6:
|
||||||
|
// In email
|
||||||
|
if rune == '>' {
|
||||||
|
state = 7
|
||||||
|
e.end = offset
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
// After email
|
||||||
|
}
|
||||||
|
}
|
||||||
|
switch state {
|
||||||
|
case 1:
|
||||||
|
// ended in the name
|
||||||
|
n.end = len(id)
|
||||||
|
case 3:
|
||||||
|
// ended in comment
|
||||||
|
c.end = len(id)
|
||||||
|
case 6:
|
||||||
|
// ended in email
|
||||||
|
e.end = len(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
name = strings.TrimSpace(id[n.start:n.end])
|
||||||
|
comment = strings.TrimSpace(id[c.start:c.end])
|
||||||
|
email = strings.TrimSpace(id[e.start:e.end])
|
||||||
|
return
|
||||||
|
}
|
442
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/read.go
generated
vendored
Normal file
442
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/read.go
generated
vendored
Normal file
|
@ -0,0 +1,442 @@
|
||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package openpgp implements high level operations on OpenPGP messages.
|
||||||
|
package openpgp // import "golang.org/x/crypto/openpgp"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto"
|
||||||
|
_ "crypto/sha256"
|
||||||
|
"hash"
|
||||||
|
"io"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"golang.org/x/crypto/openpgp/armor"
|
||||||
|
"golang.org/x/crypto/openpgp/errors"
|
||||||
|
"golang.org/x/crypto/openpgp/packet"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SignatureType is the armor type for a PGP signature.
|
||||||
|
var SignatureType = "PGP SIGNATURE"
|
||||||
|
|
||||||
|
// readArmored reads an armored block with the given type.
|
||||||
|
func readArmored(r io.Reader, expectedType string) (body io.Reader, err error) {
|
||||||
|
block, err := armor.Decode(r)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if block.Type != expectedType {
|
||||||
|
return nil, errors.InvalidArgumentError("expected '" + expectedType + "', got: " + block.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
return block.Body, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MessageDetails contains the result of parsing an OpenPGP encrypted and/or
|
||||||
|
// signed message.
|
||||||
|
type MessageDetails struct {
|
||||||
|
IsEncrypted bool // true if the message was encrypted.
|
||||||
|
EncryptedToKeyIds []uint64 // the list of recipient key ids.
|
||||||
|
IsSymmetricallyEncrypted bool // true if a passphrase could have decrypted the message.
|
||||||
|
DecryptedWith Key // the private key used to decrypt the message, if any.
|
||||||
|
IsSigned bool // true if the message is signed.
|
||||||
|
SignedByKeyId uint64 // the key id of the signer, if any.
|
||||||
|
SignedBy *Key // the key of the signer, if available.
|
||||||
|
LiteralData *packet.LiteralData // the metadata of the contents
|
||||||
|
UnverifiedBody io.Reader // the contents of the message.
|
||||||
|
|
||||||
|
// If IsSigned is true and SignedBy is non-zero then the signature will
|
||||||
|
// be verified as UnverifiedBody is read. The signature cannot be
|
||||||
|
// checked until the whole of UnverifiedBody is read so UnverifiedBody
|
||||||
|
// must be consumed until EOF before the data can be trusted. Even if a
|
||||||
|
// message isn't signed (or the signer is unknown) the data may contain
|
||||||
|
// an authentication code that is only checked once UnverifiedBody has
|
||||||
|
// been consumed. Once EOF has been seen, the following fields are
|
||||||
|
// valid. (An authentication code failure is reported as a
|
||||||
|
// SignatureError error when reading from UnverifiedBody.)
|
||||||
|
SignatureError error // nil if the signature is good.
|
||||||
|
Signature *packet.Signature // the signature packet itself, if v4 (default)
|
||||||
|
SignatureV3 *packet.SignatureV3 // the signature packet if it is a v2 or v3 signature
|
||||||
|
|
||||||
|
decrypted io.ReadCloser
|
||||||
|
}
|
||||||
|
|
||||||
|
// A PromptFunction is used as a callback by functions that may need to decrypt
|
||||||
|
// a private key, or prompt for a passphrase. It is called with a list of
|
||||||
|
// acceptable, encrypted private keys and a boolean that indicates whether a
|
||||||
|
// passphrase is usable. It should either decrypt a private key or return a
|
||||||
|
// passphrase to try. If the decrypted private key or given passphrase isn't
|
||||||
|
// correct, the function will be called again, forever. Any error returned will
|
||||||
|
// be passed up.
|
||||||
|
type PromptFunction func(keys []Key, symmetric bool) ([]byte, error)
|
||||||
|
|
||||||
|
// A keyEnvelopePair is used to store a private key with the envelope that
|
||||||
|
// contains a symmetric key, encrypted with that key.
|
||||||
|
type keyEnvelopePair struct {
|
||||||
|
key Key
|
||||||
|
encryptedKey *packet.EncryptedKey
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadMessage parses an OpenPGP message that may be signed and/or encrypted.
|
||||||
|
// The given KeyRing should contain both public keys (for signature
|
||||||
|
// verification) and, possibly encrypted, private keys for decrypting.
|
||||||
|
// If config is nil, sensible defaults will be used.
|
||||||
|
func ReadMessage(r io.Reader, keyring KeyRing, prompt PromptFunction, config *packet.Config) (md *MessageDetails, err error) {
|
||||||
|
var p packet.Packet
|
||||||
|
|
||||||
|
var symKeys []*packet.SymmetricKeyEncrypted
|
||||||
|
var pubKeys []keyEnvelopePair
|
||||||
|
var se *packet.SymmetricallyEncrypted
|
||||||
|
|
||||||
|
packets := packet.NewReader(r)
|
||||||
|
md = new(MessageDetails)
|
||||||
|
md.IsEncrypted = true
|
||||||
|
|
||||||
|
// The message, if encrypted, starts with a number of packets
|
||||||
|
// containing an encrypted decryption key. The decryption key is either
|
||||||
|
// encrypted to a public key, or with a passphrase. This loop
|
||||||
|
// collects these packets.
|
||||||
|
ParsePackets:
|
||||||
|
for {
|
||||||
|
p, err = packets.Next()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
switch p := p.(type) {
|
||||||
|
case *packet.SymmetricKeyEncrypted:
|
||||||
|
// This packet contains the decryption key encrypted with a passphrase.
|
||||||
|
md.IsSymmetricallyEncrypted = true
|
||||||
|
symKeys = append(symKeys, p)
|
||||||
|
case *packet.EncryptedKey:
|
||||||
|
// This packet contains the decryption key encrypted to a public key.
|
||||||
|
md.EncryptedToKeyIds = append(md.EncryptedToKeyIds, p.KeyId)
|
||||||
|
switch p.Algo {
|
||||||
|
case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal:
|
||||||
|
break
|
||||||
|
default:
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
var keys []Key
|
||||||
|
if p.KeyId == 0 {
|
||||||
|
keys = keyring.DecryptionKeys()
|
||||||
|
} else {
|
||||||
|
keys = keyring.KeysById(p.KeyId)
|
||||||
|
}
|
||||||
|
for _, k := range keys {
|
||||||
|
pubKeys = append(pubKeys, keyEnvelopePair{k, p})
|
||||||
|
}
|
||||||
|
case *packet.SymmetricallyEncrypted:
|
||||||
|
se = p
|
||||||
|
break ParsePackets
|
||||||
|
case *packet.Compressed, *packet.LiteralData, *packet.OnePassSignature:
|
||||||
|
// This message isn't encrypted.
|
||||||
|
if len(symKeys) != 0 || len(pubKeys) != 0 {
|
||||||
|
return nil, errors.StructuralError("key material not followed by encrypted message")
|
||||||
|
}
|
||||||
|
packets.Unread(p)
|
||||||
|
return readSignedMessage(packets, nil, keyring)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var candidates []Key
|
||||||
|
var decrypted io.ReadCloser
|
||||||
|
|
||||||
|
// Now that we have the list of encrypted keys we need to decrypt at
|
||||||
|
// least one of them or, if we cannot, we need to call the prompt
|
||||||
|
// function so that it can decrypt a key or give us a passphrase.
|
||||||
|
FindKey:
|
||||||
|
for {
|
||||||
|
// See if any of the keys already have a private key available
|
||||||
|
candidates = candidates[:0]
|
||||||
|
candidateFingerprints := make(map[string]bool)
|
||||||
|
|
||||||
|
for _, pk := range pubKeys {
|
||||||
|
if pk.key.PrivateKey == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !pk.key.PrivateKey.Encrypted {
|
||||||
|
if len(pk.encryptedKey.Key) == 0 {
|
||||||
|
pk.encryptedKey.Decrypt(pk.key.PrivateKey, config)
|
||||||
|
}
|
||||||
|
if len(pk.encryptedKey.Key) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
decrypted, err = se.Decrypt(pk.encryptedKey.CipherFunc, pk.encryptedKey.Key)
|
||||||
|
if err != nil && err != errors.ErrKeyIncorrect {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if decrypted != nil {
|
||||||
|
md.DecryptedWith = pk.key
|
||||||
|
break FindKey
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
fpr := string(pk.key.PublicKey.Fingerprint[:])
|
||||||
|
if v := candidateFingerprints[fpr]; v {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
candidates = append(candidates, pk.key)
|
||||||
|
candidateFingerprints[fpr] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(candidates) == 0 && len(symKeys) == 0 {
|
||||||
|
return nil, errors.ErrKeyIncorrect
|
||||||
|
}
|
||||||
|
|
||||||
|
if prompt == nil {
|
||||||
|
return nil, errors.ErrKeyIncorrect
|
||||||
|
}
|
||||||
|
|
||||||
|
passphrase, err := prompt(candidates, len(symKeys) != 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try the symmetric passphrase first
|
||||||
|
if len(symKeys) != 0 && passphrase != nil {
|
||||||
|
for _, s := range symKeys {
|
||||||
|
key, cipherFunc, err := s.Decrypt(passphrase)
|
||||||
|
if err == nil {
|
||||||
|
decrypted, err = se.Decrypt(cipherFunc, key)
|
||||||
|
if err != nil && err != errors.ErrKeyIncorrect {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if decrypted != nil {
|
||||||
|
break FindKey
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
md.decrypted = decrypted
|
||||||
|
if err := packets.Push(decrypted); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return readSignedMessage(packets, md, keyring)
|
||||||
|
}
|
||||||
|
|
||||||
|
// readSignedMessage reads a possibly signed message if mdin is non-zero then
|
||||||
|
// that structure is updated and returned. Otherwise a fresh MessageDetails is
|
||||||
|
// used.
|
||||||
|
func readSignedMessage(packets *packet.Reader, mdin *MessageDetails, keyring KeyRing) (md *MessageDetails, err error) {
|
||||||
|
if mdin == nil {
|
||||||
|
mdin = new(MessageDetails)
|
||||||
|
}
|
||||||
|
md = mdin
|
||||||
|
|
||||||
|
var p packet.Packet
|
||||||
|
var h hash.Hash
|
||||||
|
var wrappedHash hash.Hash
|
||||||
|
FindLiteralData:
|
||||||
|
for {
|
||||||
|
p, err = packets.Next()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
switch p := p.(type) {
|
||||||
|
case *packet.Compressed:
|
||||||
|
if err := packets.Push(p.Body); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
case *packet.OnePassSignature:
|
||||||
|
if !p.IsLast {
|
||||||
|
return nil, errors.UnsupportedError("nested signatures")
|
||||||
|
}
|
||||||
|
|
||||||
|
h, wrappedHash, err = hashForSignature(p.Hash, p.SigType)
|
||||||
|
if err != nil {
|
||||||
|
md = nil
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
md.IsSigned = true
|
||||||
|
md.SignedByKeyId = p.KeyId
|
||||||
|
keys := keyring.KeysByIdUsage(p.KeyId, packet.KeyFlagSign)
|
||||||
|
if len(keys) > 0 {
|
||||||
|
md.SignedBy = &keys[0]
|
||||||
|
}
|
||||||
|
case *packet.LiteralData:
|
||||||
|
md.LiteralData = p
|
||||||
|
break FindLiteralData
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if md.SignedBy != nil {
|
||||||
|
md.UnverifiedBody = &signatureCheckReader{packets, h, wrappedHash, md}
|
||||||
|
} else if md.decrypted != nil {
|
||||||
|
md.UnverifiedBody = checkReader{md}
|
||||||
|
} else {
|
||||||
|
md.UnverifiedBody = md.LiteralData.Body
|
||||||
|
}
|
||||||
|
|
||||||
|
return md, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// hashForSignature returns a pair of hashes that can be used to verify a
|
||||||
|
// signature. The signature may specify that the contents of the signed message
|
||||||
|
// should be preprocessed (i.e. to normalize line endings). Thus this function
|
||||||
|
// returns two hashes. The second should be used to hash the message itself and
|
||||||
|
// performs any needed preprocessing.
|
||||||
|
func hashForSignature(hashId crypto.Hash, sigType packet.SignatureType) (hash.Hash, hash.Hash, error) {
|
||||||
|
if !hashId.Available() {
|
||||||
|
return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashId)))
|
||||||
|
}
|
||||||
|
h := hashId.New()
|
||||||
|
|
||||||
|
switch sigType {
|
||||||
|
case packet.SigTypeBinary:
|
||||||
|
return h, h, nil
|
||||||
|
case packet.SigTypeText:
|
||||||
|
return h, NewCanonicalTextHash(h), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkReader wraps an io.Reader from a LiteralData packet. When it sees EOF
|
||||||
|
// it closes the ReadCloser from any SymmetricallyEncrypted packet to trigger
|
||||||
|
// MDC checks.
|
||||||
|
type checkReader struct {
|
||||||
|
md *MessageDetails
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cr checkReader) Read(buf []byte) (n int, err error) {
|
||||||
|
n, err = cr.md.LiteralData.Body.Read(buf)
|
||||||
|
if err == io.EOF {
|
||||||
|
mdcErr := cr.md.decrypted.Close()
|
||||||
|
if mdcErr != nil {
|
||||||
|
err = mdcErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// signatureCheckReader wraps an io.Reader from a LiteralData packet and hashes
|
||||||
|
// the data as it is read. When it sees an EOF from the underlying io.Reader
|
||||||
|
// it parses and checks a trailing Signature packet and triggers any MDC checks.
|
||||||
|
type signatureCheckReader struct {
|
||||||
|
packets *packet.Reader
|
||||||
|
h, wrappedHash hash.Hash
|
||||||
|
md *MessageDetails
|
||||||
|
}
|
||||||
|
|
||||||
|
func (scr *signatureCheckReader) Read(buf []byte) (n int, err error) {
|
||||||
|
n, err = scr.md.LiteralData.Body.Read(buf)
|
||||||
|
scr.wrappedHash.Write(buf[:n])
|
||||||
|
if err == io.EOF {
|
||||||
|
var p packet.Packet
|
||||||
|
p, scr.md.SignatureError = scr.packets.Next()
|
||||||
|
if scr.md.SignatureError != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var ok bool
|
||||||
|
if scr.md.Signature, ok = p.(*packet.Signature); ok {
|
||||||
|
scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignature(scr.h, scr.md.Signature)
|
||||||
|
} else if scr.md.SignatureV3, ok = p.(*packet.SignatureV3); ok {
|
||||||
|
scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignatureV3(scr.h, scr.md.SignatureV3)
|
||||||
|
} else {
|
||||||
|
scr.md.SignatureError = errors.StructuralError("LiteralData not followed by Signature")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// The SymmetricallyEncrypted packet, if any, might have an
|
||||||
|
// unsigned hash of its own. In order to check this we need to
|
||||||
|
// close that Reader.
|
||||||
|
if scr.md.decrypted != nil {
|
||||||
|
mdcErr := scr.md.decrypted.Close()
|
||||||
|
if mdcErr != nil {
|
||||||
|
err = mdcErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckDetachedSignature takes a signed file and a detached signature and
|
||||||
|
// returns the signer if the signature is valid. If the signer isn't known,
|
||||||
|
// ErrUnknownIssuer is returned.
|
||||||
|
func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) {
|
||||||
|
var issuerKeyId uint64
|
||||||
|
var hashFunc crypto.Hash
|
||||||
|
var sigType packet.SignatureType
|
||||||
|
var keys []Key
|
||||||
|
var p packet.Packet
|
||||||
|
|
||||||
|
packets := packet.NewReader(signature)
|
||||||
|
for {
|
||||||
|
p, err = packets.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
return nil, errors.ErrUnknownIssuer
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch sig := p.(type) {
|
||||||
|
case *packet.Signature:
|
||||||
|
if sig.IssuerKeyId == nil {
|
||||||
|
return nil, errors.StructuralError("signature doesn't have an issuer")
|
||||||
|
}
|
||||||
|
issuerKeyId = *sig.IssuerKeyId
|
||||||
|
hashFunc = sig.Hash
|
||||||
|
sigType = sig.SigType
|
||||||
|
case *packet.SignatureV3:
|
||||||
|
issuerKeyId = sig.IssuerKeyId
|
||||||
|
hashFunc = sig.Hash
|
||||||
|
sigType = sig.SigType
|
||||||
|
default:
|
||||||
|
return nil, errors.StructuralError("non signature packet found")
|
||||||
|
}
|
||||||
|
|
||||||
|
keys = keyring.KeysByIdUsage(issuerKeyId, packet.KeyFlagSign)
|
||||||
|
if len(keys) > 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(keys) == 0 {
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
h, wrappedHash, err := hashForSignature(hashFunc, sigType)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := io.Copy(wrappedHash, signed); err != nil && err != io.EOF {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, key := range keys {
|
||||||
|
switch sig := p.(type) {
|
||||||
|
case *packet.Signature:
|
||||||
|
err = key.PublicKey.VerifySignature(h, sig)
|
||||||
|
case *packet.SignatureV3:
|
||||||
|
err = key.PublicKey.VerifySignatureV3(h, sig)
|
||||||
|
default:
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
return key.Entity, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckArmoredDetachedSignature performs the same actions as
|
||||||
|
// CheckDetachedSignature but expects the signature to be armored.
|
||||||
|
func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) {
|
||||||
|
body, err := readArmored(signature, SignatureType)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
return CheckDetachedSignature(keyring, signed, body)
|
||||||
|
}
|
273
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go
generated
vendored
Normal file
273
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go
generated
vendored
Normal file
|
@ -0,0 +1,273 @@
|
||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package s2k implements the various OpenPGP string-to-key transforms as
|
||||||
|
// specified in RFC 4800 section 3.7.1.
|
||||||
|
package s2k // import "golang.org/x/crypto/openpgp/s2k"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto"
|
||||||
|
"hash"
|
||||||
|
"io"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"golang.org/x/crypto/openpgp/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Config collects configuration parameters for s2k key-stretching
|
||||||
|
// transformatioms. A nil *Config is valid and results in all default
|
||||||
|
// values. Currently, Config is used only by the Serialize function in
|
||||||
|
// this package.
|
||||||
|
type Config struct {
|
||||||
|
// Hash is the default hash function to be used. If
|
||||||
|
// nil, SHA1 is used.
|
||||||
|
Hash crypto.Hash
|
||||||
|
// S2KCount is only used for symmetric encryption. It
|
||||||
|
// determines the strength of the passphrase stretching when
|
||||||
|
// the said passphrase is hashed to produce a key. S2KCount
|
||||||
|
// should be between 1024 and 65011712, inclusive. If Config
|
||||||
|
// is nil or S2KCount is 0, the value 65536 used. Not all
|
||||||
|
// values in the above range can be represented. S2KCount will
|
||||||
|
// be rounded up to the next representable value if it cannot
|
||||||
|
// be encoded exactly. When set, it is strongly encrouraged to
|
||||||
|
// use a value that is at least 65536. See RFC 4880 Section
|
||||||
|
// 3.7.1.3.
|
||||||
|
S2KCount int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Config) hash() crypto.Hash {
|
||||||
|
if c == nil || uint(c.Hash) == 0 {
|
||||||
|
// SHA1 is the historical default in this package.
|
||||||
|
return crypto.SHA1
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.Hash
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Config) encodedCount() uint8 {
|
||||||
|
if c == nil || c.S2KCount == 0 {
|
||||||
|
return 96 // The common case. Correspoding to 65536
|
||||||
|
}
|
||||||
|
|
||||||
|
i := c.S2KCount
|
||||||
|
switch {
|
||||||
|
// Behave like GPG. Should we make 65536 the lowest value used?
|
||||||
|
case i < 1024:
|
||||||
|
i = 1024
|
||||||
|
case i > 65011712:
|
||||||
|
i = 65011712
|
||||||
|
}
|
||||||
|
|
||||||
|
return encodeCount(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
// encodeCount converts an iterative "count" in the range 1024 to
|
||||||
|
// 65011712, inclusive, to an encoded count. The return value is the
|
||||||
|
// octet that is actually stored in the GPG file. encodeCount panics
|
||||||
|
// if i is not in the above range (encodedCount above takes care to
|
||||||
|
// pass i in the correct range). See RFC 4880 Section 3.7.7.1.
|
||||||
|
func encodeCount(i int) uint8 {
|
||||||
|
if i < 1024 || i > 65011712 {
|
||||||
|
panic("count arg i outside the required range")
|
||||||
|
}
|
||||||
|
|
||||||
|
for encoded := 0; encoded < 256; encoded++ {
|
||||||
|
count := decodeCount(uint8(encoded))
|
||||||
|
if count >= i {
|
||||||
|
return uint8(encoded)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 255
|
||||||
|
}
|
||||||
|
|
||||||
|
// decodeCount returns the s2k mode 3 iterative "count" corresponding to
|
||||||
|
// the encoded octet c.
|
||||||
|
func decodeCount(c uint8) int {
|
||||||
|
return (16 + int(c&15)) << (uint32(c>>4) + 6)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Simple writes to out the result of computing the Simple S2K function (RFC
|
||||||
|
// 4880, section 3.7.1.1) using the given hash and input passphrase.
|
||||||
|
func Simple(out []byte, h hash.Hash, in []byte) {
|
||||||
|
Salted(out, h, in, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
var zero [1]byte
|
||||||
|
|
||||||
|
// Salted writes to out the result of computing the Salted S2K function (RFC
|
||||||
|
// 4880, section 3.7.1.2) using the given hash, input passphrase and salt.
|
||||||
|
func Salted(out []byte, h hash.Hash, in []byte, salt []byte) {
|
||||||
|
done := 0
|
||||||
|
var digest []byte
|
||||||
|
|
||||||
|
for i := 0; done < len(out); i++ {
|
||||||
|
h.Reset()
|
||||||
|
for j := 0; j < i; j++ {
|
||||||
|
h.Write(zero[:])
|
||||||
|
}
|
||||||
|
h.Write(salt)
|
||||||
|
h.Write(in)
|
||||||
|
digest = h.Sum(digest[:0])
|
||||||
|
n := copy(out[done:], digest)
|
||||||
|
done += n
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Iterated writes to out the result of computing the Iterated and Salted S2K
|
||||||
|
// function (RFC 4880, section 3.7.1.3) using the given hash, input passphrase,
|
||||||
|
// salt and iteration count.
|
||||||
|
func Iterated(out []byte, h hash.Hash, in []byte, salt []byte, count int) {
|
||||||
|
combined := make([]byte, len(in)+len(salt))
|
||||||
|
copy(combined, salt)
|
||||||
|
copy(combined[len(salt):], in)
|
||||||
|
|
||||||
|
if count < len(combined) {
|
||||||
|
count = len(combined)
|
||||||
|
}
|
||||||
|
|
||||||
|
done := 0
|
||||||
|
var digest []byte
|
||||||
|
for i := 0; done < len(out); i++ {
|
||||||
|
h.Reset()
|
||||||
|
for j := 0; j < i; j++ {
|
||||||
|
h.Write(zero[:])
|
||||||
|
}
|
||||||
|
written := 0
|
||||||
|
for written < count {
|
||||||
|
if written+len(combined) > count {
|
||||||
|
todo := count - written
|
||||||
|
h.Write(combined[:todo])
|
||||||
|
written = count
|
||||||
|
} else {
|
||||||
|
h.Write(combined)
|
||||||
|
written += len(combined)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
digest = h.Sum(digest[:0])
|
||||||
|
n := copy(out[done:], digest)
|
||||||
|
done += n
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse reads a binary specification for a string-to-key transformation from r
|
||||||
|
// and returns a function which performs that transform.
|
||||||
|
func Parse(r io.Reader) (f func(out, in []byte), err error) {
|
||||||
|
var buf [9]byte
|
||||||
|
|
||||||
|
_, err = io.ReadFull(r, buf[:2])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
hash, ok := HashIdToHash(buf[1])
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.UnsupportedError("hash for S2K function: " + strconv.Itoa(int(buf[1])))
|
||||||
|
}
|
||||||
|
if !hash.Available() {
|
||||||
|
return nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hash)))
|
||||||
|
}
|
||||||
|
h := hash.New()
|
||||||
|
|
||||||
|
switch buf[0] {
|
||||||
|
case 0:
|
||||||
|
f := func(out, in []byte) {
|
||||||
|
Simple(out, h, in)
|
||||||
|
}
|
||||||
|
return f, nil
|
||||||
|
case 1:
|
||||||
|
_, err = io.ReadFull(r, buf[:8])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
f := func(out, in []byte) {
|
||||||
|
Salted(out, h, in, buf[:8])
|
||||||
|
}
|
||||||
|
return f, nil
|
||||||
|
case 3:
|
||||||
|
_, err = io.ReadFull(r, buf[:9])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
count := decodeCount(buf[8])
|
||||||
|
f := func(out, in []byte) {
|
||||||
|
Iterated(out, h, in, buf[:8], count)
|
||||||
|
}
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, errors.UnsupportedError("S2K function")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Serialize salts and stretches the given passphrase and writes the
|
||||||
|
// resulting key into key. It also serializes an S2K descriptor to
|
||||||
|
// w. The key stretching can be configured with c, which may be
|
||||||
|
// nil. In that case, sensible defaults will be used.
|
||||||
|
func Serialize(w io.Writer, key []byte, rand io.Reader, passphrase []byte, c *Config) error {
|
||||||
|
var buf [11]byte
|
||||||
|
buf[0] = 3 /* iterated and salted */
|
||||||
|
buf[1], _ = HashToHashId(c.hash())
|
||||||
|
salt := buf[2:10]
|
||||||
|
if _, err := io.ReadFull(rand, salt); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
encodedCount := c.encodedCount()
|
||||||
|
count := decodeCount(encodedCount)
|
||||||
|
buf[10] = encodedCount
|
||||||
|
if _, err := w.Write(buf[:]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
Iterated(key, c.hash().New(), passphrase, salt, count)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// hashToHashIdMapping contains pairs relating OpenPGP's hash identifier with
|
||||||
|
// Go's crypto.Hash type. See RFC 4880, section 9.4.
|
||||||
|
var hashToHashIdMapping = []struct {
|
||||||
|
id byte
|
||||||
|
hash crypto.Hash
|
||||||
|
name string
|
||||||
|
}{
|
||||||
|
{1, crypto.MD5, "MD5"},
|
||||||
|
{2, crypto.SHA1, "SHA1"},
|
||||||
|
{3, crypto.RIPEMD160, "RIPEMD160"},
|
||||||
|
{8, crypto.SHA256, "SHA256"},
|
||||||
|
{9, crypto.SHA384, "SHA384"},
|
||||||
|
{10, crypto.SHA512, "SHA512"},
|
||||||
|
{11, crypto.SHA224, "SHA224"},
|
||||||
|
}
|
||||||
|
|
||||||
|
// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP
|
||||||
|
// hash id.
|
||||||
|
func HashIdToHash(id byte) (h crypto.Hash, ok bool) {
|
||||||
|
for _, m := range hashToHashIdMapping {
|
||||||
|
if m.id == id {
|
||||||
|
return m.hash, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// HashIdToString returns the name of the hash function corresponding to the
|
||||||
|
// given OpenPGP hash id.
|
||||||
|
func HashIdToString(id byte) (name string, ok bool) {
|
||||||
|
for _, m := range hashToHashIdMapping {
|
||||||
|
if m.id == id {
|
||||||
|
return m.name, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
|
||||||
|
// HashIdToHash returns an OpenPGP hash id which corresponds the given Hash.
|
||||||
|
func HashToHashId(h crypto.Hash) (id byte, ok bool) {
|
||||||
|
for _, m := range hashToHashIdMapping {
|
||||||
|
if m.hash == h {
|
||||||
|
return m.id, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0, false
|
||||||
|
}
|
378
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/write.go
generated
vendored
Normal file
378
vendor/github.com/elastic/beats/dev-tools/vendor/golang.org/x/crypto/openpgp/write.go
generated
vendored
Normal file
|
@ -0,0 +1,378 @@
|
||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package openpgp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto"
|
||||||
|
"hash"
|
||||||
|
"io"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/crypto/openpgp/armor"
|
||||||
|
"golang.org/x/crypto/openpgp/errors"
|
||||||
|
"golang.org/x/crypto/openpgp/packet"
|
||||||
|
"golang.org/x/crypto/openpgp/s2k"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DetachSign signs message with the private key from signer (which must
|
||||||
|
// already have been decrypted) and writes the signature to w.
|
||||||
|
// If config is nil, sensible defaults will be used.
|
||||||
|
func DetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error {
|
||||||
|
return detachSign(w, signer, message, packet.SigTypeBinary, config)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ArmoredDetachSign signs message with the private key from signer (which
|
||||||
|
// must already have been decrypted) and writes an armored signature to w.
|
||||||
|
// If config is nil, sensible defaults will be used.
|
||||||
|
func ArmoredDetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) (err error) {
|
||||||
|
return armoredDetachSign(w, signer, message, packet.SigTypeBinary, config)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DetachSignText signs message (after canonicalising the line endings) with
|
||||||
|
// the private key from signer (which must already have been decrypted) and
|
||||||
|
// writes the signature to w.
|
||||||
|
// If config is nil, sensible defaults will be used.
|
||||||
|
func DetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error {
|
||||||
|
return detachSign(w, signer, message, packet.SigTypeText, config)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ArmoredDetachSignText signs message (after canonicalising the line endings)
|
||||||
|
// with the private key from signer (which must already have been decrypted)
|
||||||
|
// and writes an armored signature to w.
|
||||||
|
// If config is nil, sensible defaults will be used.
|
||||||
|
func ArmoredDetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error {
|
||||||
|
return armoredDetachSign(w, signer, message, packet.SigTypeText, config)
|
||||||
|
}
|
||||||
|
|
||||||
|
func armoredDetachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) {
|
||||||
|
out, err := armor.Encode(w, SignatureType, nil)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = detachSign(out, signer, message, sigType, config)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return out.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) {
|
||||||
|
if signer.PrivateKey == nil {
|
||||||
|
return errors.InvalidArgumentError("signing key doesn't have a private key")
|
||||||
|
}
|
||||||
|
if signer.PrivateKey.Encrypted {
|
||||||
|
return errors.InvalidArgumentError("signing key is encrypted")
|
||||||
|
}
|
||||||
|
|
||||||
|
sig := new(packet.Signature)
|
||||||
|
sig.SigType = sigType
|
||||||
|
sig.PubKeyAlgo = signer.PrivateKey.PubKeyAlgo
|
||||||
|
sig.Hash = config.Hash()
|
||||||
|
sig.CreationTime = config.Now()
|
||||||
|
sig.IssuerKeyId = &signer.PrivateKey.KeyId
|
||||||
|
|
||||||
|
h, wrappedHash, err := hashForSignature(sig.Hash, sig.SigType)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
io.Copy(wrappedHash, message)
|
||||||
|
|
||||||
|
err = sig.Sign(h, signer.PrivateKey, config)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
return sig.Serialize(w)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileHints contains metadata about encrypted files. This metadata is, itself,
|
||||||
|
// encrypted.
|
||||||
|
type FileHints struct {
|
||||||
|
// IsBinary can be set to hint that the contents are binary data.
|
||||||
|
IsBinary bool
|
||||||
|
// FileName hints at the name of the file that should be written. It's
|
||||||
|
// truncated to 255 bytes if longer. It may be empty to suggest that the
|
||||||
|
// file should not be written to disk. It may be equal to "_CONSOLE" to
|
||||||
|
// suggest the data should not be written to disk.
|
||||||
|
FileName string
|
||||||
|
// ModTime contains the modification time of the file, or the zero time if not applicable.
|
||||||
|
ModTime time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// SymmetricallyEncrypt acts like gpg -c: it encrypts a file with a passphrase.
|
||||||
|
// The resulting WriteCloser must be closed after the contents of the file have
|
||||||
|
// been written.
|
||||||
|
// If config is nil, sensible defaults will be used.
|
||||||
|
func SymmetricallyEncrypt(ciphertext io.Writer, passphrase []byte, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) {
|
||||||
|
if hints == nil {
|
||||||
|
hints = &FileHints{}
|
||||||
|
}
|
||||||
|
|
||||||
|
key, err := packet.SerializeSymmetricKeyEncrypted(ciphertext, passphrase, config)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
w, err := packet.SerializeSymmetricallyEncrypted(ciphertext, config.Cipher(), key, config)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
literaldata := w
|
||||||
|
if algo := config.Compression(); algo != packet.CompressionNone {
|
||||||
|
var compConfig *packet.CompressionConfig
|
||||||
|
if config != nil {
|
||||||
|
compConfig = config.CompressionConfig
|
||||||
|
}
|
||||||
|
literaldata, err = packet.SerializeCompressed(w, algo, compConfig)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var epochSeconds uint32
|
||||||
|
if !hints.ModTime.IsZero() {
|
||||||
|
epochSeconds = uint32(hints.ModTime.Unix())
|
||||||
|
}
|
||||||
|
return packet.SerializeLiteral(literaldata, hints.IsBinary, hints.FileName, epochSeconds)
|
||||||
|
}
|
||||||
|
|
||||||
|
// intersectPreferences mutates and returns a prefix of a that contains only
|
||||||
|
// the values in the intersection of a and b. The order of a is preserved.
|
||||||
|
func intersectPreferences(a []uint8, b []uint8) (intersection []uint8) {
|
||||||
|
var j int
|
||||||
|
for _, v := range a {
|
||||||
|
for _, v2 := range b {
|
||||||
|
if v == v2 {
|
||||||
|
a[j] = v
|
||||||
|
j++
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return a[:j]
|
||||||
|
}
|
||||||
|
|
||||||
|
func hashToHashId(h crypto.Hash) uint8 {
|
||||||
|
v, ok := s2k.HashToHashId(h)
|
||||||
|
if !ok {
|
||||||
|
panic("tried to convert unknown hash")
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encrypt encrypts a message to a number of recipients and, optionally, signs
|
||||||
|
// it. hints contains optional information, that is also encrypted, that aids
|
||||||
|
// the recipients in processing the message. The resulting WriteCloser must
|
||||||
|
// be closed after the contents of the file have been written.
|
||||||
|
// If config is nil, sensible defaults will be used.
|
||||||
|
func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) {
|
||||||
|
var signer *packet.PrivateKey
|
||||||
|
if signed != nil {
|
||||||
|
signKey, ok := signed.signingKey(config.Now())
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.InvalidArgumentError("no valid signing keys")
|
||||||
|
}
|
||||||
|
signer = signKey.PrivateKey
|
||||||
|
if signer == nil {
|
||||||
|
return nil, errors.InvalidArgumentError("no private key in signing key")
|
||||||
|
}
|
||||||
|
if signer.Encrypted {
|
||||||
|
return nil, errors.InvalidArgumentError("signing key must be decrypted")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// These are the possible ciphers that we'll use for the message.
|
||||||
|
candidateCiphers := []uint8{
|
||||||
|
uint8(packet.CipherAES128),
|
||||||
|
uint8(packet.CipherAES256),
|
||||||
|
uint8(packet.CipherCAST5),
|
||||||
|
}
|
||||||
|
// These are the possible hash functions that we'll use for the signature.
|
||||||
|
candidateHashes := []uint8{
|
||||||
|
hashToHashId(crypto.SHA256),
|
||||||
|
hashToHashId(crypto.SHA512),
|
||||||
|
hashToHashId(crypto.SHA1),
|
||||||
|
hashToHashId(crypto.RIPEMD160),
|
||||||
|
}
|
||||||
|
// In the event that a recipient doesn't specify any supported ciphers
|
||||||
|
// or hash functions, these are the ones that we assume that every
|
||||||
|
// implementation supports.
|
||||||
|
defaultCiphers := candidateCiphers[len(candidateCiphers)-1:]
|
||||||
|
defaultHashes := candidateHashes[len(candidateHashes)-1:]
|
||||||
|
|
||||||
|
encryptKeys := make([]Key, len(to))
|
||||||
|
for i := range to {
|
||||||
|
var ok bool
|
||||||
|
encryptKeys[i], ok = to[i].encryptionKey(config.Now())
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no encryption keys")
|
||||||
|
}
|
||||||
|
|
||||||
|
sig := to[i].primaryIdentity().SelfSignature
|
||||||
|
|
||||||
|
preferredSymmetric := sig.PreferredSymmetric
|
||||||
|
if len(preferredSymmetric) == 0 {
|
||||||
|
preferredSymmetric = defaultCiphers
|
||||||
|
}
|
||||||
|
preferredHashes := sig.PreferredHash
|
||||||
|
if len(preferredHashes) == 0 {
|
||||||
|
preferredHashes = defaultHashes
|
||||||
|
}
|
||||||
|
candidateCiphers = intersectPreferences(candidateCiphers, preferredSymmetric)
|
||||||
|
candidateHashes = intersectPreferences(candidateHashes, preferredHashes)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(candidateCiphers) == 0 || len(candidateHashes) == 0 {
|
||||||
|
return nil, errors.InvalidArgumentError("cannot encrypt because recipient set shares no common algorithms")
|
||||||
|
}
|
||||||
|
|
||||||
|
cipher := packet.CipherFunction(candidateCiphers[0])
|
||||||
|
// If the cipher specified by config is a candidate, we'll use that.
|
||||||
|
configuredCipher := config.Cipher()
|
||||||
|
for _, c := range candidateCiphers {
|
||||||
|
cipherFunc := packet.CipherFunction(c)
|
||||||
|
if cipherFunc == configuredCipher {
|
||||||
|
cipher = cipherFunc
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var hash crypto.Hash
|
||||||
|
for _, hashId := range candidateHashes {
|
||||||
|
if h, ok := s2k.HashIdToHash(hashId); ok && h.Available() {
|
||||||
|
hash = h
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the hash specified by config is a candidate, we'll use that.
|
||||||
|
if configuredHash := config.Hash(); configuredHash.Available() {
|
||||||
|
for _, hashId := range candidateHashes {
|
||||||
|
if h, ok := s2k.HashIdToHash(hashId); ok && h == configuredHash {
|
||||||
|
hash = h
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if hash == 0 {
|
||||||
|
hashId := candidateHashes[0]
|
||||||
|
name, ok := s2k.HashIdToString(hashId)
|
||||||
|
if !ok {
|
||||||
|
name = "#" + strconv.Itoa(int(hashId))
|
||||||
|
}
|
||||||
|
return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)")
|
||||||
|
}
|
||||||
|
|
||||||
|
symKey := make([]byte, cipher.KeySize())
|
||||||
|
if _, err := io.ReadFull(config.Random(), symKey); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, key := range encryptKeys {
|
||||||
|
if err := packet.SerializeEncryptedKey(ciphertext, key.PublicKey, cipher, symKey, config); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
encryptedData, err := packet.SerializeSymmetricallyEncrypted(ciphertext, cipher, symKey, config)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if signer != nil {
|
||||||
|
ops := &packet.OnePassSignature{
|
||||||
|
SigType: packet.SigTypeBinary,
|
||||||
|
Hash: hash,
|
||||||
|
PubKeyAlgo: signer.PubKeyAlgo,
|
||||||
|
KeyId: signer.KeyId,
|
||||||
|
IsLast: true,
|
||||||
|
}
|
||||||
|
if err := ops.Serialize(encryptedData); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if hints == nil {
|
||||||
|
hints = &FileHints{}
|
||||||
|
}
|
||||||
|
|
||||||
|
w := encryptedData
|
||||||
|
if signer != nil {
|
||||||
|
// If we need to write a signature packet after the literal
|
||||||
|
// data then we need to stop literalData from closing
|
||||||
|
// encryptedData.
|
||||||
|
w = noOpCloser{encryptedData}
|
||||||
|
|
||||||
|
}
|
||||||
|
var epochSeconds uint32
|
||||||
|
if !hints.ModTime.IsZero() {
|
||||||
|
epochSeconds = uint32(hints.ModTime.Unix())
|
||||||
|
}
|
||||||
|
literalData, err := packet.SerializeLiteral(w, hints.IsBinary, hints.FileName, epochSeconds)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if signer != nil {
|
||||||
|
return signatureWriter{encryptedData, literalData, hash, hash.New(), signer, config}, nil
|
||||||
|
}
|
||||||
|
return literalData, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// signatureWriter hashes the contents of a message while passing it along to
|
||||||
|
// literalData. When closed, it closes literalData, writes a signature packet
|
||||||
|
// to encryptedData and then also closes encryptedData.
|
||||||
|
type signatureWriter struct {
|
||||||
|
encryptedData io.WriteCloser
|
||||||
|
literalData io.WriteCloser
|
||||||
|
hashType crypto.Hash
|
||||||
|
h hash.Hash
|
||||||
|
signer *packet.PrivateKey
|
||||||
|
config *packet.Config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s signatureWriter) Write(data []byte) (int, error) {
|
||||||
|
s.h.Write(data)
|
||||||
|
return s.literalData.Write(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s signatureWriter) Close() error {
|
||||||
|
sig := &packet.Signature{
|
||||||
|
SigType: packet.SigTypeBinary,
|
||||||
|
PubKeyAlgo: s.signer.PubKeyAlgo,
|
||||||
|
Hash: s.hashType,
|
||||||
|
CreationTime: s.config.Now(),
|
||||||
|
IssuerKeyId: &s.signer.KeyId,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := sig.Sign(s.h, s.signer, s.config); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := s.literalData.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := sig.Serialize(s.encryptedData); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return s.encryptedData.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// noOpCloser is like an ioutil.NopCloser, but for an io.Writer.
|
||||||
|
// TODO: we have two of these in OpenPGP packages alone. This probably needs
|
||||||
|
// to be promoted somewhere more common.
|
||||||
|
type noOpCloser struct {
|
||||||
|
w io.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c noOpCloser) Write(data []byte) (n int, err error) {
|
||||||
|
return c.w.Write(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c noOpCloser) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -6,4 +6,7 @@ filebeat
|
||||||
|
|
||||||
build
|
build
|
||||||
_meta/kibana
|
_meta/kibana
|
||||||
|
_meta/module.generated
|
||||||
|
_meta/beat.yml
|
||||||
|
_meta/beat.full.yml
|
||||||
/tests/load/logs
|
/tests/load/logs
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
BEATNAME?=filebeat
|
BEAT_NAME?=filebeat
|
||||||
BEAT_DESCRIPTION?=Filebeat sends log files to Logstash or directly to Elasticsearch.
|
BEAT_DESCRIPTION?=Filebeat sends log files to Logstash or directly to Elasticsearch.
|
||||||
SYSTEM_TESTS=true
|
SYSTEM_TESTS=true
|
||||||
TEST_ENVIRONMENT?=true
|
TEST_ENVIRONMENT?=true
|
||||||
GOX_FLAGS='-arch=amd64 386 arm ppc64 ppc64le'
|
GOX_FLAGS=-arch="amd64 386 arm ppc64 ppc64le"
|
||||||
|
|
||||||
include ../libbeat/scripts/Makefile
|
include ../libbeat/scripts/Makefile
|
||||||
|
|
||||||
|
@ -27,13 +27,34 @@ fields:
|
||||||
cat ${ES_BEATS}/filebeat/_meta/fields.common.yml > _meta/fields.generated.yml
|
cat ${ES_BEATS}/filebeat/_meta/fields.common.yml > _meta/fields.generated.yml
|
||||||
. ${PYTHON_ENV}/bin/activate; python ${ES_BEATS}/metricbeat/scripts/fields_collector.py >> _meta/fields.generated.yml
|
. ${PYTHON_ENV}/bin/activate; python ${ES_BEATS}/metricbeat/scripts/fields_collector.py >> _meta/fields.generated.yml
|
||||||
|
|
||||||
|
# Collects all modules files to be packaged in a temporary folder
|
||||||
|
.PHONY: modules
|
||||||
|
modules:
|
||||||
|
mkdir -p _meta/
|
||||||
|
rm -rf _meta/module.generated
|
||||||
|
rsync -av module/ _meta/module.generated --exclude "_meta" --exclude "*/*/test"
|
||||||
|
|
||||||
|
# Collects all module configs
|
||||||
|
.PHONY: configs
|
||||||
|
configs: python-env
|
||||||
|
cat ${ES_BEATS}/filebeat/_meta/common.p1.yml > _meta/beat.yml
|
||||||
|
# disabled wile filebeat modules are Beta
|
||||||
|
#. ${PYTHON_ENV}/bin/activate; python ${ES_BEATS}/metricbeat/scripts/config_collector.py --beat ${BEAT_NAME} $(PWD) >> _meta/beat.yml
|
||||||
|
cat ${ES_BEATS}/filebeat/_meta/common.p2.yml >> _meta/beat.yml
|
||||||
|
cat ${ES_BEATS}/filebeat/_meta/common.full.p1.yml > _meta/beat.full.yml
|
||||||
|
. ${PYTHON_ENV}/bin/activate; python ${ES_BEATS}/metricbeat/scripts/config_collector.py --beat ${BEAT_NAME} --full $(PWD) >> _meta/beat.full.yml
|
||||||
|
cat ${ES_BEATS}/filebeat/_meta/common.full.p2.yml >> _meta/beat.full.yml
|
||||||
|
|
||||||
|
# Collects all module docs
|
||||||
|
.PHONY: collect-docs
|
||||||
|
collect-docs: python-env
|
||||||
|
-rm -rf docs/modules
|
||||||
|
mkdir -p docs/modules
|
||||||
|
. ${PYTHON_ENV}/bin/activate; python ${ES_BEATS}/filebeat/scripts/docs_collector.py --beat ${BEAT_NAME}
|
||||||
|
|
||||||
# Runs all collection steps and updates afterwards
|
# Runs all collection steps and updates afterwards
|
||||||
.PHONY: collect
|
.PHONY: collect
|
||||||
collect:
|
collect: fields kibana modules configs collect-docs
|
||||||
# dummy implementation to avoid adding the modules until released
|
|
||||||
mkdir -p _meta/
|
|
||||||
cat ${ES_BEATS}/filebeat/_meta/fields.common.yml > _meta/fields.generated.yml
|
|
||||||
|
|
||||||
|
|
||||||
# Creates a new fileset. Requires the params MODULE and FILESET
|
# Creates a new fileset. Requires the params MODULE and FILESET
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
##################$$$###### Filebeat Configuration ############################
|
######################## Filebeat Configuration ############################
|
||||||
|
|
||||||
# This file is a full configuration example documenting all non-deprecated
|
# This file is a full configuration example documenting all non-deprecated
|
||||||
# options in comments. For a shorter configuration example, that contains only
|
# options in comments. For a shorter configuration example, that contains only
|
||||||
|
@ -7,6 +7,164 @@
|
||||||
# You can find the full configuration reference here:
|
# You can find the full configuration reference here:
|
||||||
# https://www.elastic.co/guide/en/beats/filebeat/index.html
|
# https://www.elastic.co/guide/en/beats/filebeat/index.html
|
||||||
|
|
||||||
|
|
||||||
|
#========================== Modules configuration ============================
|
||||||
|
filebeat.modules:
|
||||||
|
|
||||||
|
#------------------------------- System Module -------------------------------
|
||||||
|
#- module: system
|
||||||
|
# Syslog
|
||||||
|
#syslog:
|
||||||
|
#enabled: true
|
||||||
|
|
||||||
|
# Set custom paths for the log files. If left empty,
|
||||||
|
# Filebeat will choose the paths depending on your OS.
|
||||||
|
#var.paths:
|
||||||
|
|
||||||
|
# Prospector configuration (advanced). Any prospector configuration option
|
||||||
|
# can be added under this section.
|
||||||
|
#prospector:
|
||||||
|
|
||||||
|
#------------------------------- Apache2 Module ------------------------------
|
||||||
|
#- module: apache2
|
||||||
|
# Access logs
|
||||||
|
#access:
|
||||||
|
#enabled: true
|
||||||
|
|
||||||
|
# Ingest Node pipeline to use. Options are `with_plugins` (default)
|
||||||
|
# and `no_plugins`. Use `no_plugins` if you don't have the geoip or
|
||||||
|
# the user agent Node ingest plugins installed.
|
||||||
|
#var.pipeline: with_plugins
|
||||||
|
|
||||||
|
# Set custom paths for the log files. If left empty,
|
||||||
|
# Filebeat will choose the paths depending on your OS.
|
||||||
|
#var.paths:
|
||||||
|
|
||||||
|
# Prospector configuration (advanced). Any prospector configuration option
|
||||||
|
# can be added under this section.
|
||||||
|
#prospector:
|
||||||
|
|
||||||
|
# Error logs
|
||||||
|
#error:
|
||||||
|
#enabled: true
|
||||||
|
|
||||||
|
# Set custom paths for the log files. If left empty,
|
||||||
|
# Filebeat will choose the paths depending on your OS.
|
||||||
|
#var.paths:
|
||||||
|
|
||||||
|
# Prospector configuration (advanced). Any prospector configuration option
|
||||||
|
# can be added under this section.
|
||||||
|
#prospector:
|
||||||
|
|
||||||
|
#------------------------------- Auditd Module -------------------------------
|
||||||
|
#- module: auditd
|
||||||
|
#log:
|
||||||
|
#enabled: true
|
||||||
|
|
||||||
|
# Set custom paths for the log files. If left empty,
|
||||||
|
# Filebeat will choose the paths depending on your OS.
|
||||||
|
#var.paths:
|
||||||
|
|
||||||
|
# Prospector configuration (advanced). Any prospector configuration option
|
||||||
|
# can be added under this section.
|
||||||
|
#prospector:
|
||||||
|
|
||||||
|
#------------------------------- Icinga Module -------------------------------
|
||||||
|
#- module: icinga
|
||||||
|
# Main logs
|
||||||
|
#main:
|
||||||
|
#enabled: true
|
||||||
|
|
||||||
|
# Set custom paths for the log files. If left empty,
|
||||||
|
# Filebeat will choose the paths depending on your OS.
|
||||||
|
#var.paths:
|
||||||
|
|
||||||
|
# Prospector configuration (advanced). Any prospector configuration option
|
||||||
|
# can be added under this section.
|
||||||
|
#prospector:
|
||||||
|
|
||||||
|
# Debug logs
|
||||||
|
#debug:
|
||||||
|
#enabled: true
|
||||||
|
|
||||||
|
# Set custom paths for the log files. If left empty,
|
||||||
|
# Filebeat will choose the paths depending on your OS.
|
||||||
|
#var.paths:
|
||||||
|
|
||||||
|
# Prospector configuration (advanced). Any prospector configuration option
|
||||||
|
# can be added under this section.
|
||||||
|
#prospector:
|
||||||
|
|
||||||
|
# Startup logs
|
||||||
|
#startup:
|
||||||
|
#enabled: true
|
||||||
|
|
||||||
|
# Set custom paths for the log files. If left empty,
|
||||||
|
# Filebeat will choose the paths depending on your OS.
|
||||||
|
#var.paths:
|
||||||
|
|
||||||
|
# Prospector configuration (advanced). Any prospector configuration option
|
||||||
|
# can be added under this section.
|
||||||
|
#prospector:
|
||||||
|
|
||||||
|
#-------------------------------- MySQL Module -------------------------------
|
||||||
|
#- module: mysql
|
||||||
|
# Error logs
|
||||||
|
#error:
|
||||||
|
#enabled: true
|
||||||
|
|
||||||
|
# Set custom paths for the log files. If left empty,
|
||||||
|
# Filebeat will choose the paths depending on your OS.
|
||||||
|
#var.paths:
|
||||||
|
|
||||||
|
# Prospector configuration (advanced). Any prospector configuration option
|
||||||
|
# can be added under this section.
|
||||||
|
#prospector:
|
||||||
|
|
||||||
|
# Slow logs
|
||||||
|
#slowlog:
|
||||||
|
#enabled: true
|
||||||
|
|
||||||
|
# Set custom paths for the log files. If left empty,
|
||||||
|
# Filebeat will choose the paths depending on your OS.
|
||||||
|
#var.paths:
|
||||||
|
|
||||||
|
# Prospector configuration (advanced). Any prospector configuration option
|
||||||
|
# can be added under this section.
|
||||||
|
#prospector:
|
||||||
|
|
||||||
|
#-------------------------------- Nginx Module -------------------------------
|
||||||
|
#- module: nginx
|
||||||
|
# Access logs
|
||||||
|
#access:
|
||||||
|
#enabled: true
|
||||||
|
|
||||||
|
# Ingest Node pipeline to use. Options are `with_plugins` (default)
|
||||||
|
# and `no_plugins`. Use `no_plugins` if you don't have the geoip or
|
||||||
|
# the user agent Node ingest plugins installed.
|
||||||
|
#var.pipeline: with_plugins
|
||||||
|
|
||||||
|
# Set custom paths for the log files. If left empty,
|
||||||
|
# Filebeat will choose the paths depending on your OS.
|
||||||
|
#var.paths:
|
||||||
|
|
||||||
|
# Prospector configuration (advanced). Any prospector configuration option
|
||||||
|
# can be added under this section.
|
||||||
|
#prospector:
|
||||||
|
|
||||||
|
# Error logs
|
||||||
|
#error:
|
||||||
|
#enabled: true
|
||||||
|
|
||||||
|
# Set custom paths for the log files. If left empty,
|
||||||
|
# Filebeat will choose the paths depending on your OS.
|
||||||
|
#var.paths:
|
||||||
|
|
||||||
|
# Prospector configuration (advanced). Any prospector configuration option
|
||||||
|
# can be added under this section.
|
||||||
|
#prospector:
|
||||||
|
|
||||||
|
|
||||||
#=========================== Filebeat prospectors =============================
|
#=========================== Filebeat prospectors =============================
|
||||||
|
|
||||||
# List of prospectors to fetch data.
|
# List of prospectors to fetch data.
|
||||||
|
@ -45,16 +203,16 @@ filebeat.prospectors:
|
||||||
# Exclude lines. A list of regular expressions to match. It drops the lines that are
|
# Exclude lines. A list of regular expressions to match. It drops the lines that are
|
||||||
# matching any regular expression from the list. The include_lines is called before
|
# matching any regular expression from the list. The include_lines is called before
|
||||||
# exclude_lines. By default, no lines are dropped.
|
# exclude_lines. By default, no lines are dropped.
|
||||||
#exclude_lines: ["^DBG"]
|
#exclude_lines: ['^DBG']
|
||||||
|
|
||||||
# Include lines. A list of regular expressions to match. It exports the lines that are
|
# Include lines. A list of regular expressions to match. It exports the lines that are
|
||||||
# matching any regular expression from the list. The include_lines is called before
|
# matching any regular expression from the list. The include_lines is called before
|
||||||
# exclude_lines. By default, all the lines are exported.
|
# exclude_lines. By default, all the lines are exported.
|
||||||
#include_lines: ["^ERR", "^WARN"]
|
#include_lines: ['^ERR', '^WARN']
|
||||||
|
|
||||||
# Exclude files. A list of regular expressions to match. Filebeat drops the files that
|
# Exclude files. A list of regular expressions to match. Filebeat drops the files that
|
||||||
# are matching any regular expression from the list. By default, no files are dropped.
|
# are matching any regular expression from the list. By default, no files are dropped.
|
||||||
#exclude_files: [".gz$"]
|
#exclude_files: ['.gz$']
|
||||||
|
|
||||||
# Optional additional fields. These field can be freely picked
|
# Optional additional fields. These field can be freely picked
|
||||||
# to add additional information to the crawled log files for filtering
|
# to add additional information to the crawled log files for filtering
|
||||||
|
@ -143,7 +301,11 @@ filebeat.prospectors:
|
||||||
# this can mean that the first entries of a new file are skipped.
|
# this can mean that the first entries of a new file are skipped.
|
||||||
#tail_files: false
|
#tail_files: false
|
||||||
|
|
||||||
# Experimental: If symlinks is enabled, symlinks are opened and harvested. The harvester is openening the
|
# The Ingest Node pipeline ID associated with this prospector. If this is set, it
|
||||||
|
# overwrites the pipeline option from the Elasticsearch output.
|
||||||
|
#pipeline:
|
||||||
|
|
||||||
|
# If symlinks is enabled, symlinks are opened and harvested. The harvester is openening the
|
||||||
# original for harvesting but will report the symlink name as source.
|
# original for harvesting but will report the symlink name as source.
|
||||||
#symlinks: false
|
#symlinks: false
|
||||||
|
|
||||||
|
@ -165,7 +327,7 @@ filebeat.prospectors:
|
||||||
# The backoff value will be multiplied each time with the backoff_factor until max_backoff is reached
|
# The backoff value will be multiplied each time with the backoff_factor until max_backoff is reached
|
||||||
#backoff_factor: 2
|
#backoff_factor: 2
|
||||||
|
|
||||||
# Experimental: Max number of harvesters that are started in parallel.
|
# Max number of harvesters that are started in parallel.
|
||||||
# Default is 0 which means unlimited
|
# Default is 0 which means unlimited
|
||||||
#harvester_limit: 0
|
#harvester_limit: 0
|
||||||
|
|
||||||
|
|
|
@ -7,6 +7,33 @@
|
||||||
# You can find the full configuration reference here:
|
# You can find the full configuration reference here:
|
||||||
# https://www.elastic.co/guide/en/beats/filebeat/index.html
|
# https://www.elastic.co/guide/en/beats/filebeat/index.html
|
||||||
|
|
||||||
|
|
||||||
|
#========================== Modules configuration ============================
|
||||||
|
filebeat.modules:
|
||||||
|
|
||||||
|
#------------------------------- System Module -------------------------------
|
||||||
|
#- module: system
|
||||||
|
|
||||||
|
#------------------------------- Auditd Module -------------------------------
|
||||||
|
#- module: auditd
|
||||||
|
|
||||||
|
#------------------------------- Icinga Module -------------------------------
|
||||||
|
#- module: icinga
|
||||||
|
|
||||||
|
#-------------------------------- MySQL Module -------------------------------
|
||||||
|
#- module: mysql
|
||||||
|
|
||||||
|
#-------------------------------- Nginx Module -------------------------------
|
||||||
|
#- module: nginx
|
||||||
|
# Ingest Node pipeline to use. Options are `with_plugins` (default)
|
||||||
|
# and `no_plugins`. Use `no_plugins` if you don't have the geoip or
|
||||||
|
# the user agent Node ingest plugins installed.
|
||||||
|
#access.var.pipeline: with_plugins
|
||||||
|
|
||||||
|
|
||||||
|
# For more available modules and options, please see the filebeat.full.yml sample
|
||||||
|
# configuration file.
|
||||||
|
|
||||||
#=========================== Filebeat prospectors =============================
|
#=========================== Filebeat prospectors =============================
|
||||||
|
|
||||||
filebeat.prospectors:
|
filebeat.prospectors:
|
||||||
|
@ -24,15 +51,15 @@ filebeat.prospectors:
|
||||||
|
|
||||||
# Exclude lines. A list of regular expressions to match. It drops the lines that are
|
# Exclude lines. A list of regular expressions to match. It drops the lines that are
|
||||||
# matching any regular expression from the list.
|
# matching any regular expression from the list.
|
||||||
#exclude_lines: ["^DBG"]
|
#exclude_lines: ['^DBG']
|
||||||
|
|
||||||
# Include lines. A list of regular expressions to match. It exports the lines that are
|
# Include lines. A list of regular expressions to match. It exports the lines that are
|
||||||
# matching any regular expression from the list.
|
# matching any regular expression from the list.
|
||||||
#include_lines: ["^ERR", "^WARN"]
|
#include_lines: ['^ERR', '^WARN']
|
||||||
|
|
||||||
# Exclude files. A list of regular expressions to match. Filebeat drops the files that
|
# Exclude files. A list of regular expressions to match. Filebeat drops the files that
|
||||||
# are matching any regular expression from the list. By default, no files are dropped.
|
# are matching any regular expression from the list. By default, no files are dropped.
|
||||||
#exclude_files: [".gz$"]
|
#exclude_files: ['.gz$']
|
||||||
|
|
||||||
# Optional additional fields. These field can be freely picked
|
# Optional additional fields. These field can be freely picked
|
||||||
# to add additional information to the crawled log files for filtering
|
# to add additional information to the crawled log files for filtering
|
||||||
|
|
|
@ -0,0 +1,9 @@
|
||||||
|
######################## Filebeat Configuration ############################
|
||||||
|
|
||||||
|
# This file is a full configuration example documenting all non-deprecated
|
||||||
|
# options in comments. For a shorter configuration example, that contains only
|
||||||
|
# the most common options, please see filebeat.yml in the same directory.
|
||||||
|
#
|
||||||
|
# You can find the full configuration reference here:
|
||||||
|
# https://www.elastic.co/guide/en/beats/filebeat/index.html
|
||||||
|
|
234
vendor/github.com/elastic/beats/filebeat/_meta/common.full.p2.yml
generated
vendored
Normal file
234
vendor/github.com/elastic/beats/filebeat/_meta/common.full.p2.yml
generated
vendored
Normal file
|
@ -0,0 +1,234 @@
|
||||||
|
#=========================== Filebeat prospectors =============================
|
||||||
|
|
||||||
|
# List of prospectors to fetch data.
|
||||||
|
filebeat.prospectors:
|
||||||
|
# Each - is a prospector. Most options can be set at the prospector level, so
|
||||||
|
# you can use different prospectors for various configurations.
|
||||||
|
# Below are the prospector specific configurations.
|
||||||
|
|
||||||
|
# Type of the files. Based on this the way the file is read is decided.
|
||||||
|
# The different types cannot be mixed in one prospector
|
||||||
|
#
|
||||||
|
# Possible options are:
|
||||||
|
# * log: Reads every line of the log file (default)
|
||||||
|
# * stdin: Reads the standard in
|
||||||
|
|
||||||
|
#------------------------------ Log prospector --------------------------------
|
||||||
|
- input_type: log
|
||||||
|
|
||||||
|
# Paths that should be crawled and fetched. Glob based paths.
|
||||||
|
# To fetch all ".log" files from a specific level of subdirectories
|
||||||
|
# /var/log/*/*.log can be used.
|
||||||
|
# For each file found under this path, a harvester is started.
|
||||||
|
# Make sure not file is defined twice as this can lead to unexpected behaviour.
|
||||||
|
paths:
|
||||||
|
- /var/log/*.log
|
||||||
|
#- c:\programdata\elasticsearch\logs\*
|
||||||
|
|
||||||
|
# Configure the file encoding for reading files with international characters
|
||||||
|
# following the W3C recommendation for HTML5 (http://www.w3.org/TR/encoding).
|
||||||
|
# Some sample encodings:
|
||||||
|
# plain, utf-8, utf-16be-bom, utf-16be, utf-16le, big5, gb18030, gbk,
|
||||||
|
# hz-gb-2312, euc-kr, euc-jp, iso-2022-jp, shift-jis, ...
|
||||||
|
#encoding: plain
|
||||||
|
|
||||||
|
|
||||||
|
# Exclude lines. A list of regular expressions to match. It drops the lines that are
|
||||||
|
# matching any regular expression from the list. The include_lines is called before
|
||||||
|
# exclude_lines. By default, no lines are dropped.
|
||||||
|
#exclude_lines: ["^DBG"]
|
||||||
|
|
||||||
|
# Include lines. A list of regular expressions to match. It exports the lines that are
|
||||||
|
# matching any regular expression from the list. The include_lines is called before
|
||||||
|
# exclude_lines. By default, all the lines are exported.
|
||||||
|
#include_lines: ["^ERR", "^WARN"]
|
||||||
|
|
||||||
|
# Exclude files. A list of regular expressions to match. Filebeat drops the files that
|
||||||
|
# are matching any regular expression from the list. By default, no files are dropped.
|
||||||
|
#exclude_files: [".gz$"]
|
||||||
|
|
||||||
|
# Optional additional fields. These field can be freely picked
|
||||||
|
# to add additional information to the crawled log files for filtering
|
||||||
|
#fields:
|
||||||
|
# level: debug
|
||||||
|
# review: 1
|
||||||
|
|
||||||
|
# Set to true to store the additional fields as top level fields instead
|
||||||
|
# of under the "fields" sub-dictionary. In case of name conflicts with the
|
||||||
|
# fields added by Filebeat itself, the custom fields overwrite the default
|
||||||
|
# fields.
|
||||||
|
#fields_under_root: false
|
||||||
|
|
||||||
|
# Ignore files which were modified more then the defined timespan in the past.
|
||||||
|
# ignore_older is disabled by default, so no files are ignored by setting it to 0.
|
||||||
|
# Time strings like 2h (2 hours), 5m (5 minutes) can be used.
|
||||||
|
#ignore_older: 0
|
||||||
|
|
||||||
|
# Type to be published in the 'type' field. For Elasticsearch output,
|
||||||
|
# the type defines the document type these entries should be stored
|
||||||
|
# in. Default: log
|
||||||
|
#document_type: log
|
||||||
|
|
||||||
|
# How often the prospector checks for new files in the paths that are specified
|
||||||
|
# for harvesting. Specify 1s to scan the directory as frequently as possible
|
||||||
|
# without causing Filebeat to scan too frequently. Default: 10s.
|
||||||
|
#scan_frequency: 10s
|
||||||
|
|
||||||
|
# Defines the buffer size every harvester uses when fetching the file
|
||||||
|
#harvester_buffer_size: 16384
|
||||||
|
|
||||||
|
# Maximum number of bytes a single log event can have
|
||||||
|
# All bytes after max_bytes are discarded and not sent. The default is 10MB.
|
||||||
|
# This is especially useful for multiline log messages which can get large.
|
||||||
|
#max_bytes: 10485760
|
||||||
|
|
||||||
|
### JSON configuration
|
||||||
|
|
||||||
|
# Decode JSON options. Enable this if your logs are structured in JSON.
|
||||||
|
# JSON key on which to apply the line filtering and multiline settings. This key
|
||||||
|
# must be top level and its value must be string, otherwise it is ignored. If
|
||||||
|
# no text key is defined, the line filtering and multiline features cannot be used.
|
||||||
|
#json.message_key:
|
||||||
|
|
||||||
|
# By default, the decoded JSON is placed under a "json" key in the output document.
|
||||||
|
# If you enable this setting, the keys are copied top level in the output document.
|
||||||
|
#json.keys_under_root: false
|
||||||
|
|
||||||
|
# If keys_under_root and this setting are enabled, then the values from the decoded
|
||||||
|
# JSON object overwrite the fields that Filebeat normally adds (type, source, offset, etc.)
|
||||||
|
# in case of conflicts.
|
||||||
|
#json.overwrite_keys: false
|
||||||
|
|
||||||
|
# If this setting is enabled, Filebeat adds a "json_error" key in case of JSON
|
||||||
|
# unmarshaling errors or when a text key is defined in the configuration but cannot
|
||||||
|
# be used.
|
||||||
|
#json.add_error_key: false
|
||||||
|
|
||||||
|
### Multiline options
|
||||||
|
|
||||||
|
# Mutiline can be used for log messages spanning multiple lines. This is common
|
||||||
|
# for Java Stack Traces or C-Line Continuation
|
||||||
|
|
||||||
|
# The regexp Pattern that has to be matched. The example pattern matches all lines starting with [
|
||||||
|
#multiline.pattern: ^\[
|
||||||
|
|
||||||
|
# Defines if the pattern set under pattern should be negated or not. Default is false.
|
||||||
|
#multiline.negate: false
|
||||||
|
|
||||||
|
# Match can be set to "after" or "before". It is used to define if lines should be append to a pattern
|
||||||
|
# that was (not) matched before or after or as long as a pattern is not matched based on negate.
|
||||||
|
# Note: After is the equivalent to previous and before is the equivalent to to next in Logstash
|
||||||
|
#multiline.match: after
|
||||||
|
|
||||||
|
# The maximum number of lines that are combined to one event.
|
||||||
|
# In case there are more the max_lines the additional lines are discarded.
|
||||||
|
# Default is 500
|
||||||
|
#multiline.max_lines: 500
|
||||||
|
|
||||||
|
# After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event
|
||||||
|
# Default is 5s.
|
||||||
|
#multiline.timeout: 5s
|
||||||
|
|
||||||
|
# Setting tail_files to true means filebeat starts reading new files at the end
|
||||||
|
# instead of the beginning. If this is used in combination with log rotation
|
||||||
|
# this can mean that the first entries of a new file are skipped.
|
||||||
|
#tail_files: false
|
||||||
|
|
||||||
|
# The Ingest Node pipeline ID associated with this prospector. If this is set, it
|
||||||
|
# overwrites the pipeline option from the Elasticsearch output.
|
||||||
|
#pipeline:
|
||||||
|
|
||||||
|
# If symlinks is enabled, symlinks are opened and harvested. The harvester is openening the
|
||||||
|
# original for harvesting but will report the symlink name as source.
|
||||||
|
#symlinks: false
|
||||||
|
|
||||||
|
# Backoff values define how aggressively filebeat crawls new files for updates
|
||||||
|
# The default values can be used in most cases. Backoff defines how long it is waited
|
||||||
|
# to check a file again after EOF is reached. Default is 1s which means the file
|
||||||
|
# is checked every second if new lines were added. This leads to a near real time crawling.
|
||||||
|
# Every time a new line appears, backoff is reset to the initial value.
|
||||||
|
#backoff: 1s
|
||||||
|
|
||||||
|
# Max backoff defines what the maximum backoff time is. After having backed off multiple times
|
||||||
|
# from checking the files, the waiting time will never exceed max_backoff independent of the
|
||||||
|
# backoff factor. Having it set to 10s means in the worst case a new line can be added to a log
|
||||||
|
# file after having backed off multiple times, it takes a maximum of 10s to read the new line
|
||||||
|
#max_backoff: 10s
|
||||||
|
|
||||||
|
# The backoff factor defines how fast the algorithm backs off. The bigger the backoff factor,
|
||||||
|
# the faster the max_backoff value is reached. If this value is set to 1, no backoff will happen.
|
||||||
|
# The backoff value will be multiplied each time with the backoff_factor until max_backoff is reached
|
||||||
|
#backoff_factor: 2
|
||||||
|
|
||||||
|
# Max number of harvesters that are started in parallel.
|
||||||
|
# Default is 0 which means unlimited
|
||||||
|
#harvester_limit: 0
|
||||||
|
|
||||||
|
### Harvester closing options
|
||||||
|
|
||||||
|
# Close inactive closes the file handler after the predefined period.
|
||||||
|
# The period starts when the last line of the file was, not the file ModTime.
|
||||||
|
# Time strings like 2h (2 hours), 5m (5 minutes) can be used.
|
||||||
|
#close_inactive: 5m
|
||||||
|
|
||||||
|
# Close renamed closes a file handler when the file is renamed or rotated.
|
||||||
|
# Note: Potential data loss. Make sure to read and understand the docs for this option.
|
||||||
|
#close_renamed: false
|
||||||
|
|
||||||
|
# When enabling this option, a file handler is closed immediately in case a file can't be found
|
||||||
|
# any more. In case the file shows up again later, harvesting will continue at the last known position
|
||||||
|
# after scan_frequency.
|
||||||
|
#close_removed: true
|
||||||
|
|
||||||
|
# Closes the file handler as soon as the harvesters reaches the end of the file.
|
||||||
|
# By default this option is disabled.
|
||||||
|
# Note: Potential data loss. Make sure to read and understand the docs for this option.
|
||||||
|
#close_eof: false
|
||||||
|
|
||||||
|
### State options
|
||||||
|
|
||||||
|
# Files for the modification data is older then clean_inactive the state from the registry is removed
|
||||||
|
# By default this is disabled.
|
||||||
|
#clean_inactive: 0
|
||||||
|
|
||||||
|
# Removes the state for file which cannot be found on disk anymore immediately
|
||||||
|
#clean_removed: true
|
||||||
|
|
||||||
|
# Close timeout closes the harvester after the predefined time.
|
||||||
|
# This is independent if the harvester did finish reading the file or not.
|
||||||
|
# By default this option is disabled.
|
||||||
|
# Note: Potential data loss. Make sure to read and understand the docs for this option.
|
||||||
|
#close_timeout: 0
|
||||||
|
|
||||||
|
# Defines if prospectors is enabled
|
||||||
|
#enabled: true
|
||||||
|
|
||||||
|
#----------------------------- Stdin prospector -------------------------------
|
||||||
|
# Configuration to use stdin input
|
||||||
|
#- input_type: stdin
|
||||||
|
|
||||||
|
#========================= Filebeat global options ============================
|
||||||
|
|
||||||
|
# Event count spool threshold - forces network flush if exceeded
|
||||||
|
#filebeat.spool_size: 2048
|
||||||
|
|
||||||
|
# Enable async publisher pipeline in filebeat (Experimental!)
|
||||||
|
#filebeat.publish_async: false
|
||||||
|
|
||||||
|
# Defines how often the spooler is flushed. After idle_timeout the spooler is
|
||||||
|
# Flush even though spool_size is not reached.
|
||||||
|
#filebeat.idle_timeout: 5s
|
||||||
|
|
||||||
|
# Name of the registry file. If a relative path is used, it is considered relative to the
|
||||||
|
# data path.
|
||||||
|
#filebeat.registry_file: ${path.data}/registry
|
||||||
|
|
||||||
|
#
|
||||||
|
# These config files must have the full filebeat config part inside, but only
|
||||||
|
# the prospector part is processed. All global options like spool_size are ignored.
|
||||||
|
# The config_dir MUST point to a different directory then where the main filebeat config file is in.
|
||||||
|
#filebeat.config_dir:
|
||||||
|
|
||||||
|
# How long filebeat waits on shutdown for the publisher to finish.
|
||||||
|
# Default is 0, not waiting.
|
||||||
|
#filebeat.shutdown_timeout: 0
|
|
@ -0,0 +1,9 @@
|
||||||
|
###################### Filebeat Configuration Example #########################
|
||||||
|
|
||||||
|
# This file is an example configuration file highlighting only the most common
|
||||||
|
# options. The filebeat.full.yml file from the same directory contains all the
|
||||||
|
# supported options with more comments. You can use it as a reference.
|
||||||
|
#
|
||||||
|
# You can find the full configuration reference here:
|
||||||
|
# https://www.elastic.co/guide/en/beats/filebeat/index.html
|
||||||
|
|
|
@ -0,0 +1,49 @@
|
||||||
|
#=========================== Filebeat prospectors =============================
|
||||||
|
|
||||||
|
filebeat.prospectors:
|
||||||
|
|
||||||
|
# Each - is a prospector. Most options can be set at the prospector level, so
|
||||||
|
# you can use different prospectors for various configurations.
|
||||||
|
# Below are the prospector specific configurations.
|
||||||
|
|
||||||
|
- input_type: log
|
||||||
|
|
||||||
|
# Paths that should be crawled and fetched. Glob based paths.
|
||||||
|
paths:
|
||||||
|
- /var/log/*.log
|
||||||
|
#- c:\programdata\elasticsearch\logs\*
|
||||||
|
|
||||||
|
# Exclude lines. A list of regular expressions to match. It drops the lines that are
|
||||||
|
# matching any regular expression from the list.
|
||||||
|
#exclude_lines: ["^DBG"]
|
||||||
|
|
||||||
|
# Include lines. A list of regular expressions to match. It exports the lines that are
|
||||||
|
# matching any regular expression from the list.
|
||||||
|
#include_lines: ["^ERR", "^WARN"]
|
||||||
|
|
||||||
|
# Exclude files. A list of regular expressions to match. Filebeat drops the files that
|
||||||
|
# are matching any regular expression from the list. By default, no files are dropped.
|
||||||
|
#exclude_files: [".gz$"]
|
||||||
|
|
||||||
|
# Optional additional fields. These field can be freely picked
|
||||||
|
# to add additional information to the crawled log files for filtering
|
||||||
|
#fields:
|
||||||
|
# level: debug
|
||||||
|
# review: 1
|
||||||
|
|
||||||
|
### Multiline options
|
||||||
|
|
||||||
|
# Mutiline can be used for log messages spanning multiple lines. This is common
|
||||||
|
# for Java Stack Traces or C-Line Continuation
|
||||||
|
|
||||||
|
# The regexp Pattern that has to be matched. The example pattern matches all lines starting with [
|
||||||
|
#multiline.pattern: ^\[
|
||||||
|
|
||||||
|
# Defines if the pattern set under pattern should be negated or not. Default is false.
|
||||||
|
#multiline.negate: false
|
||||||
|
|
||||||
|
# Match can be set to "after" or "before". It is used to define if lines should be append to a pattern
|
||||||
|
# that was (not) matched before or after or as long as a pattern is not matched based on negate.
|
||||||
|
# Note: After is the equivalent to previous and before is the equivalent to to next in Logstash
|
||||||
|
#multiline.match: after
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
type: keyword
|
type: keyword
|
||||||
required: true
|
required: true
|
||||||
description: >
|
description: >
|
||||||
The file from which the line was read. This field contains the full path to the file.
|
The file from which the line was read. This field contains the absolute path to the file.
|
||||||
For example: `/var/log/system.log`.
|
For example: `/var/log/system.log`.
|
||||||
|
|
||||||
- name: offset
|
- name: offset
|
||||||
|
@ -32,3 +32,22 @@
|
||||||
required: true
|
required: true
|
||||||
description: >
|
description: >
|
||||||
The input type from which the event was generated. This field is set to the value specified for the `input_type` option in the prospector section of the Filebeat config file.
|
The input type from which the event was generated. This field is set to the value specified for the `input_type` option in the prospector section of the Filebeat config file.
|
||||||
|
|
||||||
|
- name: error
|
||||||
|
description: >
|
||||||
|
Ingestion pipeline error message, added in case there are errors reported by
|
||||||
|
the Ingest Node in Elasticsearch.
|
||||||
|
|
||||||
|
- name: read_timestamp
|
||||||
|
description: >
|
||||||
|
In case the ingest pipeline parses the timestamp from the log contents, it stores
|
||||||
|
the original `@timestamp` (representing the time when the log line was read) in this
|
||||||
|
field.
|
||||||
|
|
||||||
|
- name: fileset.module
|
||||||
|
description: >
|
||||||
|
The Filebeat module that generated this event.
|
||||||
|
|
||||||
|
- name: fileset.name
|
||||||
|
description: >
|
||||||
|
The Filebeat fileset that generated this event.
|
||||||
|
|
|
@ -1,35 +0,0 @@
|
||||||
- key: log
|
|
||||||
title: Log File Content
|
|
||||||
description: >
|
|
||||||
Contains log file lines.
|
|
||||||
fields:
|
|
||||||
- name: source
|
|
||||||
type: keyword
|
|
||||||
required: true
|
|
||||||
description: >
|
|
||||||
The file from which the line was read. This field contains the full path to the file.
|
|
||||||
For example: `/var/log/system.log`.
|
|
||||||
|
|
||||||
- name: offset
|
|
||||||
type: long
|
|
||||||
required: false
|
|
||||||
description: >
|
|
||||||
The file offset the reported line starts at.
|
|
||||||
|
|
||||||
- name: message
|
|
||||||
type: text
|
|
||||||
ignore_above: 0
|
|
||||||
required: true
|
|
||||||
description: >
|
|
||||||
The content of the line read from the log file.
|
|
||||||
|
|
||||||
- name: type
|
|
||||||
required: true
|
|
||||||
description: >
|
|
||||||
The name of the log event. This field is set to the value specified for the `document_type` option in the prospector section of the Filebeat config file.
|
|
||||||
|
|
||||||
- name: input_type
|
|
||||||
required: true
|
|
||||||
description: >
|
|
||||||
The input type from which the event was generated. This field is set to the value specified for the `input_type` option in the prospector section of the Filebeat config file.
|
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package beater
|
package beater
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -8,20 +9,25 @@ import (
|
||||||
"github.com/elastic/beats/libbeat/beat"
|
"github.com/elastic/beats/libbeat/beat"
|
||||||
"github.com/elastic/beats/libbeat/common"
|
"github.com/elastic/beats/libbeat/common"
|
||||||
"github.com/elastic/beats/libbeat/logp"
|
"github.com/elastic/beats/libbeat/logp"
|
||||||
|
"github.com/elastic/beats/libbeat/outputs/elasticsearch"
|
||||||
|
|
||||||
cfg "github.com/elastic/beats/filebeat/config"
|
cfg "github.com/elastic/beats/filebeat/config"
|
||||||
"github.com/elastic/beats/filebeat/crawler"
|
"github.com/elastic/beats/filebeat/crawler"
|
||||||
|
"github.com/elastic/beats/filebeat/fileset"
|
||||||
"github.com/elastic/beats/filebeat/publisher"
|
"github.com/elastic/beats/filebeat/publisher"
|
||||||
"github.com/elastic/beats/filebeat/registrar"
|
"github.com/elastic/beats/filebeat/registrar"
|
||||||
"github.com/elastic/beats/filebeat/spooler"
|
"github.com/elastic/beats/filebeat/spooler"
|
||||||
)
|
)
|
||||||
|
|
||||||
var once = flag.Bool("once", false, "Run filebeat only once until all harvesters reach EOF")
|
var (
|
||||||
|
once = flag.Bool("once", false, "Run filebeat only once until all harvesters reach EOF")
|
||||||
|
)
|
||||||
|
|
||||||
// Filebeat is a beater object. Contains all objects needed to run the beat
|
// Filebeat is a beater object. Contains all objects needed to run the beat
|
||||||
type Filebeat struct {
|
type Filebeat struct {
|
||||||
config *cfg.Config
|
config *cfg.Config
|
||||||
done chan struct{}
|
moduleRegistry *fileset.ModuleRegistry
|
||||||
|
done chan struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new Filebeat pointer instance.
|
// New creates a new Filebeat pointer instance.
|
||||||
|
@ -30,22 +36,73 @@ func New(b *beat.Beat, rawConfig *common.Config) (beat.Beater, error) {
|
||||||
if err := rawConfig.Unpack(&config); err != nil {
|
if err := rawConfig.Unpack(&config); err != nil {
|
||||||
return nil, fmt.Errorf("Error reading config file: %v", err)
|
return nil, fmt.Errorf("Error reading config file: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
moduleRegistry, err := fileset.NewModuleRegistry(config.Modules, b.Version)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
moduleProspectors, err := moduleRegistry.GetProspectorConfigs()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
if err := config.FetchConfigs(); err != nil {
|
if err := config.FetchConfigs(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Add prospectors created by the modules
|
||||||
|
config.Prospectors = append(config.Prospectors, moduleProspectors...)
|
||||||
|
|
||||||
|
if !config.ProspectorReload.Enabled() && len(config.Prospectors) == 0 {
|
||||||
|
return nil, errors.New("No prospectors defined. What files do you want me to watch?")
|
||||||
|
}
|
||||||
|
|
||||||
|
if *once && config.ProspectorReload.Enabled() {
|
||||||
|
return nil, errors.New("prospector reloading and -once cannot be used together.")
|
||||||
|
}
|
||||||
|
|
||||||
fb := &Filebeat{
|
fb := &Filebeat{
|
||||||
done: make(chan struct{}),
|
done: make(chan struct{}),
|
||||||
config: &config,
|
config: &config,
|
||||||
|
moduleRegistry: moduleRegistry,
|
||||||
}
|
}
|
||||||
return fb, nil
|
return fb, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// modulesSetup is called when modules are configured to do the initial
|
||||||
|
// setup.
|
||||||
|
func (fb *Filebeat) modulesSetup(b *beat.Beat) error {
|
||||||
|
esConfig := b.Config.Output["elasticsearch"]
|
||||||
|
if esConfig == nil || !esConfig.Enabled() {
|
||||||
|
return fmt.Errorf("Filebeat modules configured but the Elasticsearch output is not configured/enabled")
|
||||||
|
}
|
||||||
|
esClient, err := elasticsearch.NewConnectedClient(esConfig)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error creating ES client: %v", err)
|
||||||
|
}
|
||||||
|
defer esClient.Close()
|
||||||
|
|
||||||
|
err = fb.moduleRegistry.LoadPipelines(esClient)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Run allows the beater to be run as a beat.
|
// Run allows the beater to be run as a beat.
|
||||||
func (fb *Filebeat) Run(b *beat.Beat) error {
|
func (fb *Filebeat) Run(b *beat.Beat) error {
|
||||||
var err error
|
var err error
|
||||||
config := fb.config
|
config := fb.config
|
||||||
|
|
||||||
|
if !fb.moduleRegistry.Empty() {
|
||||||
|
err = fb.modulesSetup(b)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
waitFinished := newSignalWait()
|
waitFinished := newSignalWait()
|
||||||
waitEvents := newSignalWait()
|
waitEvents := newSignalWait()
|
||||||
|
|
||||||
|
@ -76,7 +133,7 @@ func (fb *Filebeat) Run(b *beat.Beat) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
crawler, err := crawler.New(newSpoolerOutlet(fb.done, spooler, wgEvents), config.Prospectors)
|
crawler, err := crawler.New(newSpoolerOutlet(fb.done, spooler, wgEvents), config.Prospectors, fb.done, *once)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logp.Err("Could not init crawler: %v", err)
|
logp.Err("Could not init crawler: %v", err)
|
||||||
return err
|
return err
|
||||||
|
@ -118,7 +175,7 @@ func (fb *Filebeat) Run(b *beat.Beat) error {
|
||||||
spooler.Stop()
|
spooler.Stop()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
err = crawler.Start(registrar.GetStates(), *once)
|
err = crawler.Start(registrar, config.ProspectorReload)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,90 @@
|
||||||
|
package channel
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
|
"github.com/elastic/beats/filebeat/input"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Outlet struct is used to be passed to an object which needs an outlet
|
||||||
|
//
|
||||||
|
// The difference between signal and done channel is as following:
|
||||||
|
// - signal channel can be added through SetSignal and is used to
|
||||||
|
// interrupt events sent through OnEventSignal-
|
||||||
|
// - done channel is used to close and stop the outlet
|
||||||
|
//
|
||||||
|
// If SetSignal is used, it must be ensure that there is only one event producer.
|
||||||
|
type Outlet struct {
|
||||||
|
wg *sync.WaitGroup // Use for counting active events
|
||||||
|
done <-chan struct{}
|
||||||
|
signal <-chan struct{}
|
||||||
|
channel chan *input.Event
|
||||||
|
isOpen int32 // atomic indicator
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewOutlet(
|
||||||
|
done <-chan struct{},
|
||||||
|
c chan *input.Event,
|
||||||
|
wg *sync.WaitGroup,
|
||||||
|
) *Outlet {
|
||||||
|
return &Outlet{
|
||||||
|
done: done,
|
||||||
|
channel: c,
|
||||||
|
wg: wg,
|
||||||
|
isOpen: 1,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSignal sets the signal channel for OnEventSignal
|
||||||
|
// If SetSignal is used, it must be ensure that only one producer exists.
|
||||||
|
func (o *Outlet) SetSignal(signal <-chan struct{}) {
|
||||||
|
o.signal = signal
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *Outlet) OnEvent(event *input.Event) bool {
|
||||||
|
open := atomic.LoadInt32(&o.isOpen) == 1
|
||||||
|
if !open {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if o.wg != nil {
|
||||||
|
o.wg.Add(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-o.done:
|
||||||
|
if o.wg != nil {
|
||||||
|
o.wg.Done()
|
||||||
|
}
|
||||||
|
atomic.StoreInt32(&o.isOpen, 0)
|
||||||
|
return false
|
||||||
|
case o.channel <- event:
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnEventSignal can be stopped by the signal that is set with SetSignal
|
||||||
|
// This does not close the outlet. Only OnEvent does close the outlet.
|
||||||
|
// If OnEventSignal is used, it must be ensured that only one producer is used.
|
||||||
|
func (o *Outlet) OnEventSignal(event *input.Event) bool {
|
||||||
|
open := atomic.LoadInt32(&o.isOpen) == 1
|
||||||
|
if !open {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if o.wg != nil {
|
||||||
|
o.wg.Add(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-o.signal:
|
||||||
|
if o.wg != nil {
|
||||||
|
o.wg.Done()
|
||||||
|
}
|
||||||
|
o.signal = nil
|
||||||
|
return false
|
||||||
|
case o.channel <- event:
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,7 +1,6 @@
|
||||||
package config
|
package config
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
@ -19,13 +18,15 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
type Config struct {
|
type Config struct {
|
||||||
Prospectors []*common.Config `config:"prospectors"`
|
Prospectors []*common.Config `config:"prospectors"`
|
||||||
SpoolSize uint64 `config:"spool_size" validate:"min=1"`
|
SpoolSize uint64 `config:"spool_size" validate:"min=1"`
|
||||||
PublishAsync bool `config:"publish_async"`
|
PublishAsync bool `config:"publish_async"`
|
||||||
IdleTimeout time.Duration `config:"idle_timeout" validate:"nonzero,min=0s"`
|
IdleTimeout time.Duration `config:"idle_timeout" validate:"nonzero,min=0s"`
|
||||||
RegistryFile string `config:"registry_file"`
|
RegistryFile string `config:"registry_file"`
|
||||||
ConfigDir string `config:"config_dir"`
|
ConfigDir string `config:"config_dir"`
|
||||||
ShutdownTimeout time.Duration `config:"shutdown_timeout"`
|
ShutdownTimeout time.Duration `config:"shutdown_timeout"`
|
||||||
|
Modules []*common.Config `config:"modules"`
|
||||||
|
ProspectorReload *common.Config `config:"config.prospectors"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -125,11 +126,5 @@ func (config *Config) FetchConfigs() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(config.Prospectors) == 0 {
|
|
||||||
err := errors.New("No paths given. What files do you want me to watch?")
|
|
||||||
log.Fatalf("%v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,79 +6,109 @@ import (
|
||||||
|
|
||||||
"github.com/elastic/beats/filebeat/input/file"
|
"github.com/elastic/beats/filebeat/input/file"
|
||||||
"github.com/elastic/beats/filebeat/prospector"
|
"github.com/elastic/beats/filebeat/prospector"
|
||||||
|
"github.com/elastic/beats/filebeat/registrar"
|
||||||
|
"github.com/elastic/beats/libbeat/cfgfile"
|
||||||
"github.com/elastic/beats/libbeat/common"
|
"github.com/elastic/beats/libbeat/common"
|
||||||
"github.com/elastic/beats/libbeat/logp"
|
"github.com/elastic/beats/libbeat/logp"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Crawler struct {
|
type Crawler struct {
|
||||||
prospectors []*prospector.Prospector
|
prospectors map[uint64]*prospector.Prospector
|
||||||
prospectorConfigs []*common.Config
|
prospectorConfigs []*common.Config
|
||||||
out prospector.Outlet
|
out prospector.Outlet
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
|
reloader *cfgfile.Reloader
|
||||||
|
once bool
|
||||||
|
beatDone chan struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(out prospector.Outlet, prospectorConfigs []*common.Config) (*Crawler, error) {
|
func New(out prospector.Outlet, prospectorConfigs []*common.Config, beatDone chan struct{}, once bool) (*Crawler, error) {
|
||||||
|
|
||||||
if len(prospectorConfigs) == 0 {
|
|
||||||
return nil, fmt.Errorf("No prospectors defined. You must have at least one prospector defined in the config file.")
|
|
||||||
}
|
|
||||||
|
|
||||||
return &Crawler{
|
return &Crawler{
|
||||||
out: out,
|
out: out,
|
||||||
|
prospectors: map[uint64]*prospector.Prospector{},
|
||||||
prospectorConfigs: prospectorConfigs,
|
prospectorConfigs: prospectorConfigs,
|
||||||
|
once: once,
|
||||||
|
beatDone: beatDone,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Crawler) Start(states file.States, once bool) error {
|
func (c *Crawler) Start(r *registrar.Registrar, reloaderConfig *common.Config) error {
|
||||||
|
|
||||||
logp.Info("Loading Prospectors: %v", len(c.prospectorConfigs))
|
logp.Info("Loading Prospectors: %v", len(c.prospectorConfigs))
|
||||||
|
|
||||||
// Prospect the globs/paths given on the command line and launch harvesters
|
// Prospect the globs/paths given on the command line and launch harvesters
|
||||||
for _, prospectorConfig := range c.prospectorConfigs {
|
for _, prospectorConfig := range c.prospectorConfigs {
|
||||||
|
err := c.startProspector(prospectorConfig, r.GetStates())
|
||||||
prospector, err := prospector.NewProspector(prospectorConfig, states, c.out)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error in initing prospector: %s", err)
|
return err
|
||||||
}
|
|
||||||
if prospector.IsEnabled() {
|
|
||||||
c.prospectors = append(c.prospectors, prospector)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
logp.Info("Loading Prospectors completed. Number of prospectors: %v", len(c.prospectors))
|
if reloaderConfig.Enabled() {
|
||||||
|
logp.Warn("BETA feature dynamic configuration reloading is enabled.")
|
||||||
|
|
||||||
for i, p := range c.prospectors {
|
c.reloader = cfgfile.NewReloader(reloaderConfig)
|
||||||
c.wg.Add(1)
|
factory := prospector.NewFactory(c.out, r, c.beatDone)
|
||||||
|
go func() {
|
||||||
go func(id int, prospector *prospector.Prospector) {
|
c.reloader.Run(factory)
|
||||||
defer func() {
|
}()
|
||||||
c.wg.Done()
|
|
||||||
logp.Debug("crawler", "Prospector %v stopped", id)
|
|
||||||
}()
|
|
||||||
logp.Debug("crawler", "Starting prospector %v", id)
|
|
||||||
prospector.Run(once)
|
|
||||||
}(i, p)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
logp.Info("All prospectors are initialised and running with %d states to persist", states.Count())
|
logp.Info("Loading and starting Prospectors completed. Enabled prospectors: %v", len(c.prospectors))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Crawler) startProspector(config *common.Config, states []file.State) error {
|
||||||
|
if !config.Enabled() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
p, err := prospector.NewProspector(config, c.out, c.beatDone)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error in initing prospector: %s", err)
|
||||||
|
}
|
||||||
|
p.Once = c.once
|
||||||
|
|
||||||
|
if _, ok := c.prospectors[p.ID()]; ok {
|
||||||
|
return fmt.Errorf("Prospector with same ID already exists: %v", p.ID())
|
||||||
|
}
|
||||||
|
|
||||||
|
err = p.LoadStates(states)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error loading states for propsector %v: %v", p.ID(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.prospectors[p.ID()] = p
|
||||||
|
|
||||||
|
p.Start()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Crawler) Stop() {
|
func (c *Crawler) Stop() {
|
||||||
logp.Info("Stopping Crawler")
|
logp.Info("Stopping Crawler")
|
||||||
stopProspector := func(p *prospector.Prospector) {
|
|
||||||
defer c.wg.Done()
|
asyncWaitStop := func(stop func()) {
|
||||||
p.Stop()
|
c.wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer c.wg.Done()
|
||||||
|
stop()
|
||||||
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
logp.Info("Stopping %v prospectors", len(c.prospectors))
|
logp.Info("Stopping %v prospectors", len(c.prospectors))
|
||||||
for _, p := range c.prospectors {
|
for _, p := range c.prospectors {
|
||||||
// Stop prospectors in parallel
|
// Stop prospectors in parallel
|
||||||
c.wg.Add(1)
|
asyncWaitStop(p.Stop)
|
||||||
go stopProspector(p)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if c.reloader != nil {
|
||||||
|
asyncWaitStop(c.reloader.Stop)
|
||||||
|
}
|
||||||
|
|
||||||
c.WaitForCompletion()
|
c.WaitForCompletion()
|
||||||
|
|
||||||
logp.Info("Crawler stopped")
|
logp.Info("Crawler stopped")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,18 +0,0 @@
|
||||||
// +build !integration
|
|
||||||
|
|
||||||
package crawler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/elastic/beats/libbeat/common"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestNewCrawlerNoProspectorsError(t *testing.T) {
|
|
||||||
prospectorConfigs := []*common.Config{}
|
|
||||||
|
|
||||||
_, error := New(nil, prospectorConfigs)
|
|
||||||
|
|
||||||
assert.Error(t, error)
|
|
||||||
}
|
|
|
@ -1,8 +1,13 @@
|
||||||
[[filebeat-command-line]]
|
[[filebeat-command-line]]
|
||||||
=== Command Line Options
|
=== Command Line Options
|
||||||
|
|
||||||
Filebeat does not have any Filebeat-specific command line options. Instead, you
|
The following command line option is specific to Filebeat.
|
||||||
configure the behaviour of Filebeat by specifying options in the configuration file.
|
|
||||||
|
*`-once`*::
|
||||||
|
When the `-once` flag is used, Filebeat starts all configured harvesters and prospectors, and runs
|
||||||
|
each prospector until the harvesters are closed. If you set the `-once` flag, you should also set
|
||||||
|
`close_eof` so the harvester is closed when the end of the file is reached.
|
||||||
|
By default harvesters are closed after `close_inactive` is reached.
|
||||||
|
|
||||||
The following command line options from libbeat are also available for Filebeat. To
|
The following command line options from libbeat are also available for Filebeat. To
|
||||||
use these options, you need to start Filebeat in the foreground.
|
use these options, you need to start Filebeat in the foreground.
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue